update dependencies

This commit is contained in:
Cadey Ratio 2018-01-20 09:57:11 -08:00
parent b4bb197f80
commit 3e711e63dc
38 changed files with 2256 additions and 204 deletions

307
Gopkg.lock generated
View File

@ -1,4 +1,5 @@
memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568" # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -8,7 +9,10 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "github.com/Xe/ln" name = "github.com/Xe/ln"
packages = [".","ex"] packages = [
".",
"ex"
]
revision = "466e05b2ef3e48ce08a367b6aaac09ee29a124e5" revision = "466e05b2ef3e48ce08a367b6aaac09ee29a124e5"
version = "v0.1" version = "v0.1"
@ -33,7 +37,10 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/aead/chacha20" name = "github.com/aead/chacha20"
packages = [".","chacha"] packages = [
".",
"chacha"
]
revision = "8d6ce0550041f9d97e7f15ec27ed489f8bbbb0fb" revision = "8d6ce0550041f9d97e7f15ec27ed489f8bbbb0fb"
[[projects]] [[projects]]
@ -45,7 +52,10 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/alecthomas/template" name = "github.com/alecthomas/template"
packages = [".","parse"] packages = [
".",
"parse"
]
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c" revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
[[projects]] [[projects]]
@ -74,15 +84,48 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "github.com/asdine/storm" name = "github.com/asdine/storm"
packages = [".","codec","codec/json","index","internal","q"] packages = [
".",
"codec",
"codec/json",
"index",
"internal",
"q"
]
revision = "68fc73b635f890fe7ba2f3b15ce80c85b28a744f" revision = "68fc73b635f890fe7ba2f3b15ce80c85b28a744f"
version = "v2.0.2" version = "v2.0.2"
[[projects]] [[projects]]
name = "github.com/aws/aws-sdk-go" name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"] packages = [
revision = "fe3adbda9bc845e750e3e5767c0a14dff202b2cc" "aws",
version = "v1.12.62" "aws/awserr",
"aws/awsutil",
"aws/client",
"aws/client/metadata",
"aws/corehandlers",
"aws/credentials",
"aws/credentials/ec2rolecreds",
"aws/credentials/endpointcreds",
"aws/credentials/stscreds",
"aws/defaults",
"aws/ec2metadata",
"aws/endpoints",
"aws/request",
"aws/session",
"aws/signer/v4",
"internal/shareddefaults",
"private/protocol",
"private/protocol/query",
"private/protocol/query/queryutil",
"private/protocol/rest",
"private/protocol/restxml",
"private/protocol/xml/xmlutil",
"service/s3",
"service/sts"
]
revision = "9ed0c8de252f04ac45a65358377103d5a1aa2d92"
version = "v1.12.66"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -99,7 +142,10 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/bifurcation/mint" name = "github.com/bifurcation/mint"
packages = [".","syntax"] packages = [
".",
"syntax"
]
revision = "350f685c15fb6b89af795dafe64fad68950948e0" revision = "350f685c15fb6b89af795dafe64fad68950948e0"
[[projects]] [[projects]]
@ -159,12 +205,22 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"] packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845" revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
[[projects]] [[projects]]
name = "github.com/google/gops" name = "github.com/google/gops"
packages = ["agent","internal","signal"] packages = [
"agent",
"internal",
"signal"
]
revision = "57e77c5c37da1f4e1af49f9d1fe760f146c1579e" revision = "57e77c5c37da1f4e1af49f9d1fe760f146c1579e"
version = "v0.3.2" version = "v0.3.2"
@ -183,7 +239,10 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/hashicorp/go-getter" name = "github.com/hashicorp/go-getter"
packages = [".","helper/url"] packages = [
".",
"helper/url"
]
revision = "961f56d2e93379b7d9c578e998d09257509a6f97" revision = "961f56d2e93379b7d9c578e998d09257509a6f97"
[[projects]] [[projects]]
@ -202,7 +261,7 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
branch = "master" branch = "master"
name = "github.com/hashicorp/go-plugin" name = "github.com/hashicorp/go-plugin"
packages = ["."] packages = ["."]
revision = "e37881a3f1a07fce82b3d99ce0342a72e53386bc" revision = "1fc09c47b843b73705f51ffb0520e3ac1bfecf99"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -219,30 +278,77 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/hashicorp/golang-lru" name = "github.com/hashicorp/golang-lru"
packages = [".","simplelru"] packages = [
".",
"simplelru"
]
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6" revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/hashicorp/hcl" name = "github.com/hashicorp/hcl"
packages = [".","hcl/ast","hcl/parser","hcl/scanner","hcl/strconv","hcl/token","json/parser","json/scanner","json/token"] packages = [
".",
"hcl/ast",
"hcl/parser",
"hcl/scanner",
"hcl/strconv",
"hcl/token",
"json/parser",
"json/scanner",
"json/token"
]
revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8" revision = "23c074d0eceb2b8a5bfdbb271ab780cde70f05a8"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/hashicorp/hcl2" name = "github.com/hashicorp/hcl2"
packages = ["gohcl","hcl","hcl/hclsyntax","hcl/json","hcldec","hclparse"] packages = [
revision = "883a81b4902ecdc60cd9d77eae4c228792827c13" "gohcl",
"hcl",
"hcl/hclsyntax",
"hcl/json",
"hcldec",
"hclparse"
]
revision = "83451bb547db5e521b21cdf02fc0018c5790f6d5"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/hashicorp/hil" name = "github.com/hashicorp/hil"
packages = [".","ast","parser","scanner"] packages = [
".",
"ast",
"parser",
"scanner"
]
revision = "fa9f258a92500514cc8e9c67020487709df92432" revision = "fa9f258a92500514cc8e9c67020487709df92432"
[[projects]] [[projects]]
name = "github.com/hashicorp/terraform" name = "github.com/hashicorp/terraform"
packages = ["config","config/configschema","config/hcl2shim","config/module","dag","flatmap","helper/hashcode","helper/hilmapstructure","helper/schema","moduledeps","plugin","plugin/discovery","registry","registry/regsrc","registry/response","svchost","svchost/auth","svchost/disco","terraform","tfdiags","version"] packages = [
"config",
"config/configschema",
"config/hcl2shim",
"config/module",
"dag",
"flatmap",
"helper/hashcode",
"helper/hilmapstructure",
"helper/schema",
"moduledeps",
"plugin",
"plugin/discovery",
"registry",
"registry/regsrc",
"registry/response",
"svchost",
"svchost/auth",
"svchost/disco",
"terraform",
"tfdiags",
"version"
]
revision = "a6008b8a48a749c7c167453b9cf55ffd572b9a5d" revision = "a6008b8a48a749c7c167453b9cf55ffd572b9a5d"
version = "v0.11.2" version = "v0.11.2"
@ -255,12 +361,14 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "github.com/jmespath/go-jmespath" name = "github.com/jmespath/go-jmespath"
packages = ["."] packages = ["."]
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9" revision = "0b12d6b5"
version = "0.2.2"
[[projects]] [[projects]]
name = "github.com/joho/godotenv" name = "github.com/joho/godotenv"
packages = [".","autoload"] packages = [
".",
"autoload"
]
revision = "a79fa1e548e2c689c241d10173efd51e5d689d5b" revision = "a79fa1e548e2c689c241d10173efd51e5d689d5b"
version = "v1.2.0" version = "v1.2.0"
@ -314,7 +422,19 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "github.com/lucas-clemente/quic-go" name = "github.com/lucas-clemente/quic-go"
packages = [".","ackhandler","congestion","h2quic","internal/crypto","internal/flowcontrol","internal/handshake","internal/protocol","internal/utils","internal/wire","qerr"] packages = [
".",
"ackhandler",
"congestion",
"h2quic",
"internal/crypto",
"internal/flowcontrol",
"internal/handshake",
"internal/protocol",
"internal/utils",
"internal/wire",
"qerr"
]
revision = "ded0eb4f6f30a8049bd334a26ff7ff728648fe13" revision = "ded0eb4f6f30a8049bd334a26ff7ff728648fe13"
version = "v0.6.0" version = "v0.6.0"
@ -326,7 +446,15 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "github.com/magefile/mage" name = "github.com/magefile/mage"
packages = ["build","mage","mg","parse","parse/srcimporter","sh","types"] packages = [
"build",
"mage",
"mg",
"parse",
"parse/srcimporter",
"sh",
"types"
]
revision = "ab3ca2f6f85577d7ec82e0a6df721147a2e737f9" revision = "ab3ca2f6f85577d7ec82e0a6df721147a2e737f9"
version = "v2.0.1" version = "v2.0.1"
@ -346,7 +474,7 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
branch = "master" branch = "master"
name = "github.com/mitchellh/cli" name = "github.com/mitchellh/cli"
packages = ["."] packages = ["."]
revision = "33edc47170b5df54d2588696d590c5e20ee583fe" revision = "518dc677a1e1222682f4e7db06721942cb8e9e4c"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -396,6 +524,12 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
packages = ["ulid"] packages = ["ulid"]
revision = "b270c2c35fc775243f87c58cf3f6969c5d9369d6" revision = "b270c2c35fc775243f87c58cf3f6969c5d9369d6"
[[projects]]
name = "github.com/oklog/run"
packages = ["."]
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
version = "v1.0.0"
[[projects]] [[projects]]
name = "github.com/oklog/ulid" name = "github.com/oklog/ulid"
packages = ["."] packages = ["."]
@ -416,7 +550,12 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "github.com/posener/complete" name = "github.com/posener/complete"
packages = [".","cmd","cmd/install","match"] packages = [
".",
"cmd",
"cmd/install",
"match"
]
revision = "dc2bc5a81accba8782bebea28628224643a8286a" revision = "dc2bc5a81accba8782bebea28628224643a8286a"
version = "v1.1" version = "v1.1"
@ -452,7 +591,12 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "github.com/ulikunitz/xz" name = "github.com/ulikunitz/xz"
packages = [".","internal/hash","internal/xlog","lzma"] packages = [
".",
"internal/hash",
"internal/xlog",
"lzma"
]
revision = "0c6b41e72360850ca4f98dc341fd999726ea007f" revision = "0c6b41e72360850ca4f98dc341fd999726ea007f"
version = "v0.5.4" version = "v0.5.4"
@ -471,7 +615,15 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/zclconf/go-cty" name = "github.com/zclconf/go-cty"
packages = ["cty","cty/convert","cty/function","cty/function/stdlib","cty/gocty","cty/json","cty/set"] packages = [
"cty",
"cty/convert",
"cty/function",
"cty/function/stdlib",
"cty/gocty",
"cty/json",
"cty/set"
]
revision = "709e4033eeb037dc543dbc2048065dfb814ce316" revision = "709e4033eeb037dc543dbc2048065dfb814ce316"
[[projects]] [[projects]]
@ -483,25 +635,76 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["acme","acme/autocert","bcrypt","blowfish","cast5","curve25519","hkdf","nacl/secretbox","openpgp","openpgp/armor","openpgp/elgamal","openpgp/errors","openpgp/packet","openpgp/s2k","pbkdf2","poly1305","salsa20","salsa20/salsa","tea","twofish","xtea"] packages = [
revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac" "acme",
"acme/autocert",
"bcrypt",
"blowfish",
"cast5",
"curve25519",
"hkdf",
"nacl/secretbox",
"openpgp",
"openpgp/armor",
"openpgp/elgamal",
"openpgp/errors",
"openpgp/packet",
"openpgp/s2k",
"pbkdf2",
"poly1305",
"salsa20",
"salsa20/salsa",
"tea",
"twofish",
"xtea"
]
revision = "a6600008915114d9c087fad9f03d75087b1a74df"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["bpf","context","html","html/atom","http2","http2/hpack","idna","internal/iana","internal/socket","internal/timeseries","ipv4","lex/httplex","trace"] packages = [
"bpf",
"context",
"html",
"html/atom",
"http2",
"http2/hpack",
"idna",
"internal/iana",
"internal/socket",
"internal/timeseries",
"ipv4",
"lex/httplex",
"trace"
]
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec" revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix"]
revision = "fff93fa7cd278d84afc205751523809c464168ab" revision = "2c42eef0765b9837fbdab12011af7830f55f88f0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] packages = [
"collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
[[projects]] [[projects]]
@ -512,12 +715,44 @@ memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","health","health/grpc_health_v1","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"] packages = [
revision = "7cea4cc846bcf00cbb27595b07da5de875ef7de9" ".",
version = "v1.9.1" "balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"health",
"health/grpc_health_v1",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "6b51017f791ae1cfbec89c52efdf444b13b550ef"
version = "v1.9.2"
[[projects]] [[projects]]
name = "gopkg.in/alecthomas/kingpin.v2" name = "gopkg.in/alecthomas/kingpin.v2"
packages = ["."] packages = ["."]
revision = "947dcec5ba9c011838740e680966fd7087a71d0d" revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
version = "v2.2.6" version = "v2.2.6"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,3 +1,36 @@
Release v1.12.66 (2018-01-19)
===
### Service Client Updates
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/glue`: Updates service API and documentation
* New AWS Glue DataCatalog APIs to manage table versions and a new feature to skip archiving of the old table version when updating table.
* `service/transcribe`: Adds new service
Release v1.12.65 (2018-01-18)
===
### Service Client Updates
* `service/sagemaker`: Updates service API and documentation
* CreateTrainingJob and CreateEndpointConfig now supports KMS Key for volume encryption.
Release v1.12.64 (2018-01-17)
===
### Service Client Updates
* `service/autoscaling-plans`: Updates service documentation
* `service/ec2`: Updates service documentation
* Documentation updates for EC2
Release v1.12.63 (2018-01-17)
===
### Service Client Updates
* `service/application-autoscaling`: Updates service API and documentation
* `service/autoscaling-plans`: Adds new service
* `service/rds`: Updates service API and documentation
* With this release you can now integrate RDS DB instances with CloudWatch Logs. We have added parameters to the operations for creating and modifying DB instances (for example CreateDBInstance) to allow you to take advantage of this capability through the CLI and API. Once you enable this feature, a stream of log events will publish to CloudWatch Logs for each log type you enable.
Release v1.12.62 (2018-01-15) Release v1.12.62 (2018-01-15)
=== ===

View File

@ -1044,10 +1044,13 @@ var awsPartition = partition{
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{}, "ap-northeast-1": endpoint{},
"ap-southeast-1": endpoint{},
"ap-southeast-2": endpoint{},
"eu-central-1": endpoint{}, "eu-central-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{}, "us-east-2": endpoint{},
"us-west-1": endpoint{},
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },
@ -1094,6 +1097,7 @@ var awsPartition = partition{
"glue": service{ "glue": service{
Endpoints: endpoints{ Endpoints: endpoints{
"ap-northeast-1": endpoint{},
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{}, "us-east-2": endpoint{},
@ -1490,6 +1494,7 @@ var awsPartition = partition{
Endpoints: endpoints{ Endpoints: endpoints{
"eu-west-1": endpoint{}, "eu-west-1": endpoint{},
"us-east-1": endpoint{}, "us-east-1": endpoint{},
"us-east-2": endpoint{},
"us-west-2": endpoint{}, "us-west-2": endpoint{},
}, },
}, },

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go" const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK // SDKVersion is the version of this SDK
const SDKVersion = "1.12.62" const SDKVersion = "1.12.66"

View File

@ -2,6 +2,7 @@ package plugin
import ( import (
"bufio" "bufio"
"context"
"crypto/subtle" "crypto/subtle"
"crypto/tls" "crypto/tls"
"errors" "errors"
@ -79,6 +80,7 @@ type Client struct {
client ClientProtocol client ClientProtocol
protocol Protocol protocol Protocol
logger hclog.Logger logger hclog.Logger
doneCtx context.Context
} }
// ClientConfig is the configuration used to initialize a new // ClientConfig is the configuration used to initialize a new
@ -310,7 +312,7 @@ func (c *Client) Client() (ClientProtocol, error) {
c.client, err = newRPCClient(c) c.client, err = newRPCClient(c)
case ProtocolGRPC: case ProtocolGRPC:
c.client, err = newGRPCClient(c) c.client, err = newGRPCClient(c.doneCtx, c)
default: default:
return nil, fmt.Errorf("unknown server protocol: %s", c.protocol) return nil, fmt.Errorf("unknown server protocol: %s", c.protocol)
@ -423,6 +425,9 @@ func (c *Client) Start() (addr net.Addr, err error) {
// Create the logging channel for when we kill // Create the logging channel for when we kill
c.doneLogging = make(chan struct{}) c.doneLogging = make(chan struct{})
// Create a context for when we kill
var ctxCancel context.CancelFunc
c.doneCtx, ctxCancel = context.WithCancel(context.Background())
if c.config.Reattach != nil { if c.config.Reattach != nil {
// Verify the process still exists. If not, then it is an error // Verify the process still exists. If not, then it is an error
@ -457,6 +462,9 @@ func (c *Client) Start() (addr net.Addr, err error) {
// Close the logging channel since that doesn't work on reattach // Close the logging channel since that doesn't work on reattach
close(c.doneLogging) close(c.doneLogging)
// Cancel the context
ctxCancel()
}(p.Pid) }(p.Pid)
// Set the address and process // Set the address and process
@ -535,6 +543,9 @@ func (c *Client) Start() (addr net.Addr, err error) {
// Mark that we exited // Mark that we exited
close(exitCh) close(exitCh)
// Cancel the context, marking that we exited
ctxCancel()
// Set that we exited, which takes a lock // Set that we exited, which takes a lock
c.l.Lock() c.l.Lock()
defer c.l.Unlock() defer c.l.Unlock()
@ -707,11 +718,10 @@ func (c *Client) Protocol() Protocol {
return c.protocol return c.protocol
} }
// dialer is compatible with grpc.WithDialer and creates the connection func netAddrDialer(addr net.Addr) func(string, time.Duration) (net.Conn, error) {
// to the plugin. return func(_ string, _ time.Duration) (net.Conn, error) {
func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
// Connect to the client // Connect to the client
conn, err := net.Dial(c.address.Network(), c.address.String()) conn, err := net.Dial(addr.Network(), addr.String())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -720,6 +730,18 @@ func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
tcpConn.SetKeepAlive(true) tcpConn.SetKeepAlive(true)
} }
return conn, nil
}
}
// dialer is compatible with grpc.WithDialer and creates the connection
// to the plugin.
func (c *Client) dialer(_ string, timeout time.Duration) (net.Conn, error) {
conn, err := netAddrDialer(c.address)("", timeout)
if err != nil {
return nil, err
}
// If we have a TLS config we wrap our connection. We only do this // If we have a TLS config we wrap our connection. We only do this
// for net/rpc since gRPC uses its own mechanism for TLS. // for net/rpc since gRPC uses its own mechanism for TLS.
if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil { if c.protocol == ProtocolNetRPC && c.config.TLSConfig != nil {

View File

@ -172,6 +172,50 @@ func TestClient_testInterface(t *testing.T) {
} }
} }
func TestClient_grpc_servercrash(t *testing.T) {
process := helperProcess("test-grpc")
c := NewClient(&ClientConfig{
Cmd: process,
HandshakeConfig: testHandshake,
Plugins: testPluginMap,
AllowedProtocols: []Protocol{ProtocolGRPC},
})
defer c.Kill()
if _, err := c.Start(); err != nil {
t.Fatalf("err: %s", err)
}
if v := c.Protocol(); v != ProtocolGRPC {
t.Fatalf("bad: %s", v)
}
// Grab the RPC client
client, err := c.Client()
if err != nil {
t.Fatalf("err should be nil, got %s", err)
}
// Grab the impl
raw, err := client.Dispense("test")
if err != nil {
t.Fatalf("err should be nil, got %s", err)
}
_, ok := raw.(testInterface)
if !ok {
t.Fatalf("bad: %#v", raw)
}
c.process.Kill()
select {
case <-c.doneCtx.Done():
case <-time.After(time.Second * 2):
t.Fatal("Context was not closed")
}
}
func TestClient_grpc(t *testing.T) { func TestClient_grpc(t *testing.T) {
process := helperProcess("test-grpc") process := helperProcess("test-grpc")
c := NewClient(&ClientConfig{ c := NewClient(&ClientConfig{

455
vendor/github.com/hashicorp/go-plugin/grpc_broker.go generated vendored Normal file
View File

@ -0,0 +1,455 @@
package plugin
import (
"context"
"crypto/tls"
"errors"
"fmt"
"log"
"net"
"sync"
"sync/atomic"
"time"
"github.com/oklog/run"
"google.golang.org/grpc"
"google.golang.org/grpc/credentials"
)
// streamer interface is used in the broker to send/receive connection
// information.
type streamer interface {
Send(*ConnInfo) error
Recv() (*ConnInfo, error)
Close()
}
// sendErr is used to pass errors back during a send.
type sendErr struct {
i *ConnInfo
ch chan error
}
// gRPCBrokerServer is used by the plugin to start a stream and to send
// connection information to/from the plugin. Implements GRPCBrokerServer and
// streamer interfaces.
type gRPCBrokerServer struct {
// send is used to send connection info to the gRPC stream.
send chan *sendErr
// recv is used to receive connection info from the gRPC stream.
recv chan *ConnInfo
// quit closes down the stream.
quit chan struct{}
// o is used to ensure we close the quit channel only once.
o sync.Once
}
func newGRPCBrokerServer() *gRPCBrokerServer {
return &gRPCBrokerServer{
send: make(chan *sendErr),
recv: make(chan *ConnInfo),
quit: make(chan struct{}),
}
}
// StartStream implements the GRPCBrokerServer interface and will block until
// the quit channel is closed or the context reports Done. The stream will pass
// connection information to/from the client.
func (s *gRPCBrokerServer) StartStream(stream GRPCBroker_StartStreamServer) error {
doneCh := stream.Context().Done()
defer s.Close()
// Proccess send stream
go func() {
for {
select {
case <-doneCh:
return
case <-s.quit:
return
case se := <-s.send:
err := stream.Send(se.i)
se.ch <- err
}
}
}()
// Process receive stream
for {
i, err := stream.Recv()
if err != nil {
return err
}
select {
case <-doneCh:
return nil
case <-s.quit:
return nil
case s.recv <- i:
}
}
return nil
}
// Send is used by the GRPCBroker to pass connection information into the stream
// to the client.
func (s *gRPCBrokerServer) Send(i *ConnInfo) error {
ch := make(chan error)
defer close(ch)
select {
case <-s.quit:
return errors.New("broker closed")
case s.send <- &sendErr{
i: i,
ch: ch,
}:
}
return <-ch
}
// Recv is used by the GRPCBroker to pass connection information that has been
// sent from the client from the stream to the broker.
func (s *gRPCBrokerServer) Recv() (*ConnInfo, error) {
select {
case <-s.quit:
return nil, errors.New("broker closed")
case i := <-s.recv:
return i, nil
}
}
// Close closes the quit channel, shutting down the stream.
func (s *gRPCBrokerServer) Close() {
s.o.Do(func() {
close(s.quit)
})
}
// gRPCBrokerClientImpl is used by the client to start a stream and to send
// connection information to/from the client. Implements GRPCBrokerClient and
// streamer interfaces.
type gRPCBrokerClientImpl struct {
// client is the underlying GRPC client used to make calls to the server.
client GRPCBrokerClient
// send is used to send connection info to the gRPC stream.
send chan *sendErr
// recv is used to receive connection info from the gRPC stream.
recv chan *ConnInfo
// quit closes down the stream.
quit chan struct{}
// o is used to ensure we close the quit channel only once.
o sync.Once
}
func newGRPCBrokerClient(conn *grpc.ClientConn) *gRPCBrokerClientImpl {
return &gRPCBrokerClientImpl{
client: NewGRPCBrokerClient(conn),
send: make(chan *sendErr),
recv: make(chan *ConnInfo),
quit: make(chan struct{}),
}
}
// StartStream implements the GRPCBrokerClient interface and will block until
// the quit channel is closed or the context reports Done. The stream will pass
// connection information to/from the plugin.
func (s *gRPCBrokerClientImpl) StartStream() error {
ctx, cancelFunc := context.WithCancel(context.Background())
defer cancelFunc()
defer s.Close()
stream, err := s.client.StartStream(ctx)
if err != nil {
return err
}
doneCh := stream.Context().Done()
go func() {
for {
select {
case <-doneCh:
return
case <-s.quit:
return
case se := <-s.send:
err := stream.Send(se.i)
se.ch <- err
}
}
}()
for {
i, err := stream.Recv()
if err != nil {
return err
}
select {
case <-doneCh:
return nil
case <-s.quit:
return nil
case s.recv <- i:
}
}
return nil
}
// Send is used by the GRPCBroker to pass connection information into the stream
// to the plugin.
func (s *gRPCBrokerClientImpl) Send(i *ConnInfo) error {
ch := make(chan error)
defer close(ch)
select {
case <-s.quit:
return errors.New("broker closed")
case s.send <- &sendErr{
i: i,
ch: ch,
}:
}
return <-ch
}
// Recv is used by the GRPCBroker to pass connection information that has been
// sent from the plugin to the broker.
func (s *gRPCBrokerClientImpl) Recv() (*ConnInfo, error) {
select {
case <-s.quit:
return nil, errors.New("broker closed")
case i := <-s.recv:
return i, nil
}
}
// Close closes the quit channel, shutting down the stream.
func (s *gRPCBrokerClientImpl) Close() {
s.o.Do(func() {
close(s.quit)
})
}
// GRPCBroker is responsible for brokering connections by unique ID.
//
// It is used by plugins to create multiple gRPC connections and data
// streams between the plugin process and the host process.
//
// This allows a plugin to request a channel with a specific ID to connect to
// or accept a connection from, and the broker handles the details of
// holding these channels open while they're being negotiated.
//
// The Plugin interface has access to these for both Server and Client.
// The broker can be used by either (optionally) to reserve and connect to
// new streams. This is useful for complex args and return values,
// or anything else you might need a data stream for.
type GRPCBroker struct {
nextId uint32
streamer streamer
streams map[uint32]*gRPCBrokerPending
tls *tls.Config
doneCh chan struct{}
o sync.Once
sync.Mutex
}
type gRPCBrokerPending struct {
ch chan *ConnInfo
doneCh chan struct{}
}
func newGRPCBroker(s streamer, tls *tls.Config) *GRPCBroker {
return &GRPCBroker{
streamer: s,
streams: make(map[uint32]*gRPCBrokerPending),
tls: tls,
doneCh: make(chan struct{}),
}
}
// Accept accepts a connection by ID.
//
// This should not be called multiple times with the same ID at one time.
func (b *GRPCBroker) Accept(id uint32) (net.Listener, error) {
listener, err := serverListener()
if err != nil {
return nil, err
}
err = b.streamer.Send(&ConnInfo{
ServiceId: id,
Network: listener.Addr().Network(),
Address: listener.Addr().String(),
})
if err != nil {
return nil, err
}
return listener, nil
}
// AcceptAndServe is used to accept a specific stream ID and immediately
// serve a gRPC server on that stream ID. This is used to easily serve
// complex arguments. Each AcceptAndServe call opens a new listener socket and
// sends the connection info down the stream to the dialer. Since a new
// connection is opened every call, these calls should be used sparingly.
// Multiple gRPC server implementations can be registered to a single
// AcceptAndServe call.
func (b *GRPCBroker) AcceptAndServe(id uint32, s func([]grpc.ServerOption) *grpc.Server) {
listener, err := b.Accept(id)
if err != nil {
log.Printf("[ERR] plugin: plugin acceptAndServe error: %s", err)
return
}
defer listener.Close()
var opts []grpc.ServerOption
if b.tls != nil {
opts = []grpc.ServerOption{grpc.Creds(credentials.NewTLS(b.tls))}
}
server := s(opts)
// Here we use a run group to close this goroutine if the server is shutdown
// or the broker is shutdown.
var g run.Group
{
// Serve on the listener, if shutting down call GracefulStop.
g.Add(func() error {
return server.Serve(listener)
}, func(err error) {
server.GracefulStop()
})
}
{
// block on the closeCh or the doneCh. If we are shutting down close the
// closeCh.
closeCh := make(chan struct{})
g.Add(func() error {
select {
case <-b.doneCh:
case <-closeCh:
}
return nil
}, func(err error) {
close(closeCh)
})
}
// Block until we are done
g.Run()
}
// Close closes the stream and all servers.
func (b *GRPCBroker) Close() error {
b.streamer.Close()
b.o.Do(func() {
close(b.doneCh)
})
return nil
}
// Dial opens a connection by ID.
func (b *GRPCBroker) Dial(id uint32) (conn *grpc.ClientConn, err error) {
var c *ConnInfo
// Open the stream
p := b.getStream(id)
select {
case c = <-p.ch:
close(p.doneCh)
case <-time.After(5 * time.Second):
return nil, fmt.Errorf("timeout waiting for connection info")
}
var addr net.Addr
switch c.Network {
case "tcp":
addr, err = net.ResolveTCPAddr("tcp", c.Address)
case "unix":
addr, err = net.ResolveUnixAddr("unix", c.Address)
default:
err = fmt.Errorf("Unknown address type: %s", c.Address)
}
if err != nil {
return nil, err
}
return dialGRPCConn(b.tls, netAddrDialer(addr))
}
// NextId returns a unique ID to use next.
//
// It is possible for very long-running plugin hosts to wrap this value,
// though it would require a very large amount of calls. In practice
// we've never seen it happen.
func (m *GRPCBroker) NextId() uint32 {
return atomic.AddUint32(&m.nextId, 1)
}
// Run starts the brokering and should be executed in a goroutine, since it
// blocks forever, or until the session closes.
//
// Uses of GRPCBroker never need to call this. It is called internally by
// the plugin host/client.
func (m *GRPCBroker) Run() {
for {
stream, err := m.streamer.Recv()
if err != nil {
// Once we receive an error, just exit
break
}
// Initialize the waiter
p := m.getStream(stream.ServiceId)
select {
case p.ch <- stream:
default:
}
go m.timeoutWait(stream.ServiceId, p)
}
}
func (m *GRPCBroker) getStream(id uint32) *gRPCBrokerPending {
m.Lock()
defer m.Unlock()
p, ok := m.streams[id]
if ok {
return p
}
m.streams[id] = &gRPCBrokerPending{
ch: make(chan *ConnInfo, 1),
doneCh: make(chan struct{}),
}
return m.streams[id]
}
func (m *GRPCBroker) timeoutWait(id uint32, p *gRPCBrokerPending) {
// Wait for the stream to either be picked up and connected, or
// for a timeout.
select {
case <-p.doneCh:
case <-time.After(5 * time.Second):
}
m.Lock()
defer m.Unlock()
// Delete the stream so no one else can grab it
delete(m.streams, id)
}

190
vendor/github.com/hashicorp/go-plugin/grpc_broker.pb.go generated vendored Normal file
View File

@ -0,0 +1,190 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: grpc_broker.proto
/*
Package plugin is a generated protocol buffer package.
It is generated from these files:
grpc_broker.proto
It has these top-level messages:
ConnInfo
*/
package plugin
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type ConnInfo struct {
ServiceId uint32 `protobuf:"varint,1,opt,name=service_id,json=serviceId" json:"service_id,omitempty"`
Network string `protobuf:"bytes,2,opt,name=network" json:"network,omitempty"`
Address string `protobuf:"bytes,3,opt,name=address" json:"address,omitempty"`
}
func (m *ConnInfo) Reset() { *m = ConnInfo{} }
func (m *ConnInfo) String() string { return proto.CompactTextString(m) }
func (*ConnInfo) ProtoMessage() {}
func (*ConnInfo) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *ConnInfo) GetServiceId() uint32 {
if m != nil {
return m.ServiceId
}
return 0
}
func (m *ConnInfo) GetNetwork() string {
if m != nil {
return m.Network
}
return ""
}
func (m *ConnInfo) GetAddress() string {
if m != nil {
return m.Address
}
return ""
}
func init() {
proto.RegisterType((*ConnInfo)(nil), "plugin.ConnInfo")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for GRPCBroker service
type GRPCBrokerClient interface {
StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error)
}
type gRPCBrokerClient struct {
cc *grpc.ClientConn
}
func NewGRPCBrokerClient(cc *grpc.ClientConn) GRPCBrokerClient {
return &gRPCBrokerClient{cc}
}
func (c *gRPCBrokerClient) StartStream(ctx context.Context, opts ...grpc.CallOption) (GRPCBroker_StartStreamClient, error) {
stream, err := grpc.NewClientStream(ctx, &_GRPCBroker_serviceDesc.Streams[0], c.cc, "/plugin.GRPCBroker/StartStream", opts...)
if err != nil {
return nil, err
}
x := &gRPCBrokerStartStreamClient{stream}
return x, nil
}
type GRPCBroker_StartStreamClient interface {
Send(*ConnInfo) error
Recv() (*ConnInfo, error)
grpc.ClientStream
}
type gRPCBrokerStartStreamClient struct {
grpc.ClientStream
}
func (x *gRPCBrokerStartStreamClient) Send(m *ConnInfo) error {
return x.ClientStream.SendMsg(m)
}
func (x *gRPCBrokerStartStreamClient) Recv() (*ConnInfo, error) {
m := new(ConnInfo)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for GRPCBroker service
type GRPCBrokerServer interface {
StartStream(GRPCBroker_StartStreamServer) error
}
func RegisterGRPCBrokerServer(s *grpc.Server, srv GRPCBrokerServer) {
s.RegisterService(&_GRPCBroker_serviceDesc, srv)
}
func _GRPCBroker_StartStream_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(GRPCBrokerServer).StartStream(&gRPCBrokerStartStreamServer{stream})
}
type GRPCBroker_StartStreamServer interface {
Send(*ConnInfo) error
Recv() (*ConnInfo, error)
grpc.ServerStream
}
type gRPCBrokerStartStreamServer struct {
grpc.ServerStream
}
func (x *gRPCBrokerStartStreamServer) Send(m *ConnInfo) error {
return x.ServerStream.SendMsg(m)
}
func (x *gRPCBrokerStartStreamServer) Recv() (*ConnInfo, error) {
m := new(ConnInfo)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _GRPCBroker_serviceDesc = grpc.ServiceDesc{
ServiceName: "plugin.GRPCBroker",
HandlerType: (*GRPCBrokerServer)(nil),
Methods: []grpc.MethodDesc{},
Streams: []grpc.StreamDesc{
{
StreamName: "StartStream",
Handler: _GRPCBroker_StartStream_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "grpc_broker.proto",
}
func init() { proto.RegisterFile("grpc_broker.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 170 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0x2f, 0x2a, 0x48,
0x8e, 0x4f, 0x2a, 0xca, 0xcf, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x2b,
0xc8, 0x29, 0x4d, 0xcf, 0xcc, 0x53, 0x8a, 0xe5, 0xe2, 0x70, 0xce, 0xcf, 0xcb, 0xf3, 0xcc, 0x4b,
0xcb, 0x17, 0x92, 0xe5, 0xe2, 0x2a, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0x8d, 0xcf, 0x4c, 0x91,
0x60, 0x54, 0x60, 0xd4, 0xe0, 0x0d, 0xe2, 0x84, 0x8a, 0x78, 0xa6, 0x08, 0x49, 0x70, 0xb1, 0xe7,
0xa5, 0x96, 0x94, 0xe7, 0x17, 0x65, 0x4b, 0x30, 0x29, 0x30, 0x6a, 0x70, 0x06, 0xc1, 0xb8, 0x20,
0x99, 0xc4, 0x94, 0x94, 0xa2, 0xd4, 0xe2, 0x62, 0x09, 0x66, 0x88, 0x0c, 0x94, 0x6b, 0xe4, 0xcc,
0xc5, 0xe5, 0x1e, 0x14, 0xe0, 0xec, 0x04, 0xb6, 0x5a, 0xc8, 0x94, 0x8b, 0x3b, 0xb8, 0x24, 0xb1,
0xa8, 0x24, 0xb8, 0xa4, 0x28, 0x35, 0x31, 0x57, 0x48, 0x40, 0x0f, 0xe2, 0x08, 0x3d, 0x98, 0x0b,
0xa4, 0x30, 0x44, 0x34, 0x18, 0x0d, 0x18, 0x93, 0xd8, 0xc0, 0x4e, 0x36, 0x06, 0x04, 0x00, 0x00,
0xff, 0xff, 0x7b, 0x5d, 0xfb, 0xe1, 0xc7, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,14 @@
syntax = "proto3";
package plugin;
message ConnInfo {
uint32 service_id = 1;
string network = 2;
string address = 3;
}
service GRPCBroker {
rpc StartStream(stream ConnInfo) returns (stream ConnInfo);
}

View File

@ -1,7 +1,10 @@
package plugin package plugin
import ( import (
"crypto/tls"
"fmt" "fmt"
"net"
"time"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -9,14 +12,12 @@ import (
"google.golang.org/grpc/health/grpc_health_v1" "google.golang.org/grpc/health/grpc_health_v1"
) )
// newGRPCClient creates a new GRPCClient. The Client argument is expected func dialGRPCConn(tls *tls.Config, dialer func(string, time.Duration) (net.Conn, error)) (*grpc.ClientConn, error) {
// to be successfully started already with a lock held.
func newGRPCClient(c *Client) (*GRPCClient, error) {
// Build dialing options. // Build dialing options.
opts := make([]grpc.DialOption, 0, 5) opts := make([]grpc.DialOption, 0, 5)
// We use a custom dialer so that we can connect over unix domain sockets // We use a custom dialer so that we can connect over unix domain sockets
opts = append(opts, grpc.WithDialer(c.dialer)) opts = append(opts, grpc.WithDialer(dialer))
// go-plugin expects to block the connection // go-plugin expects to block the connection
opts = append(opts, grpc.WithBlock()) opts = append(opts, grpc.WithBlock())
@ -26,11 +27,11 @@ func newGRPCClient(c *Client) (*GRPCClient, error) {
// If we have no TLS configuration set, we need to explicitly tell grpc // If we have no TLS configuration set, we need to explicitly tell grpc
// that we're connecting with an insecure connection. // that we're connecting with an insecure connection.
if c.config.TLSConfig == nil { if tls == nil {
opts = append(opts, grpc.WithInsecure()) opts = append(opts, grpc.WithInsecure())
} else { } else {
opts = append(opts, grpc.WithTransportCredentials( opts = append(opts, grpc.WithTransportCredentials(
credentials.NewTLS(c.config.TLSConfig))) credentials.NewTLS(tls)))
} }
// Connect. Note the first parameter is unused because we use a custom // Connect. Note the first parameter is unused because we use a custom
@ -40,9 +41,28 @@ func newGRPCClient(c *Client) (*GRPCClient, error) {
return nil, err return nil, err
} }
return conn, nil
}
// newGRPCClient creates a new GRPCClient. The Client argument is expected
// to be successfully started already with a lock held.
func newGRPCClient(doneCtx context.Context, c *Client) (*GRPCClient, error) {
conn, err := dialGRPCConn(c.config.TLSConfig, c.dialer)
if err != nil {
return nil, err
}
// Start the broker.
brokerGRPCClient := newGRPCBrokerClient(conn)
broker := newGRPCBroker(brokerGRPCClient, c.config.TLSConfig)
go broker.Run()
go brokerGRPCClient.StartStream()
return &GRPCClient{ return &GRPCClient{
Conn: conn, Conn: conn,
Plugins: c.config.Plugins, Plugins: c.config.Plugins,
doneCtx: doneCtx,
broker: broker,
}, nil }, nil
} }
@ -50,10 +70,14 @@ func newGRPCClient(c *Client) (*GRPCClient, error) {
type GRPCClient struct { type GRPCClient struct {
Conn *grpc.ClientConn Conn *grpc.ClientConn
Plugins map[string]Plugin Plugins map[string]Plugin
doneCtx context.Context
broker *GRPCBroker
} }
// ClientProtocol impl. // ClientProtocol impl.
func (c *GRPCClient) Close() error { func (c *GRPCClient) Close() error {
c.broker.Close()
return c.Conn.Close() return c.Conn.Close()
} }
@ -69,7 +93,7 @@ func (c *GRPCClient) Dispense(name string) (interface{}, error) {
return nil, fmt.Errorf("plugin %q doesn't support gRPC", name) return nil, fmt.Errorf("plugin %q doesn't support gRPC", name)
} }
return p.GRPCClient(c.Conn) return p.GRPCClient(c.doneCtx, c.broker, c.Conn)
} }
// ClientProtocol impl. // ClientProtocol impl.

View File

@ -1,7 +1,11 @@
package plugin package plugin
import ( import (
"context"
"testing" "testing"
"github.com/hashicorp/go-plugin/test/grpc"
"google.golang.org/grpc"
) )
func TestGRPCClient_App(t *testing.T) { func TestGRPCClient_App(t *testing.T) {
@ -24,6 +28,27 @@ func TestGRPCClient_App(t *testing.T) {
if result != 42 { if result != 42 {
t.Fatalf("bad: %#v", result) t.Fatalf("bad: %#v", result)
} }
err = impl.Bidirectional()
if err != nil {
t.Fatal(err)
}
}
func TestGRPCConn_BidirectionalPing(t *testing.T) {
conn, _ := TestGRPCConn(t, func(s *grpc.Server) {
grpctest.RegisterPingPongServer(s, &pingPongServer{})
})
defer conn.Close()
pingPongClient := grpctest.NewPingPongClient(conn)
pResp, err := pingPongClient.Ping(context.Background(), &grpctest.PingRequest{})
if err != nil {
t.Fatal(err)
}
if pResp.Msg != "pong" {
t.Fatal("Bad PingPong")
}
} }
func TestGRPCClient_Ping(t *testing.T) { func TestGRPCClient_Ping(t *testing.T) {

View File

@ -51,6 +51,7 @@ type GRPCServer struct {
config GRPCServerConfig config GRPCServerConfig
server *grpc.Server server *grpc.Server
broker *GRPCBroker
} }
// ServerProtocol impl. // ServerProtocol impl.
@ -68,6 +69,12 @@ func (s *GRPCServer) Init() error {
GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING) GRPCServiceName, grpc_health_v1.HealthCheckResponse_SERVING)
grpc_health_v1.RegisterHealthServer(s.server, healthCheck) grpc_health_v1.RegisterHealthServer(s.server, healthCheck)
// Register the broker service
brokerServer := newGRPCBrokerServer()
RegisterGRPCBrokerServer(s.server, brokerServer)
s.broker = newGRPCBroker(brokerServer, s.TLS)
go s.broker.Run()
// Register all our plugins onto the gRPC server. // Register all our plugins onto the gRPC server.
for k, raw := range s.Plugins { for k, raw := range s.Plugins {
p, ok := raw.(GRPCPlugin) p, ok := raw.(GRPCPlugin)
@ -75,7 +82,7 @@ func (s *GRPCServer) Init() error {
return fmt.Errorf("%q is not a GRPC-compatibile plugin", k) return fmt.Errorf("%q is not a GRPC-compatibile plugin", k)
} }
if err := p.GRPCServer(s.server); err != nil { if err := p.GRPCServer(s.broker, s.server); err != nil {
return fmt.Errorf("error registring %q: %s", k, err) return fmt.Errorf("error registring %q: %s", k, err)
} }
} }

View File

@ -9,6 +9,7 @@
package plugin package plugin
import ( import (
"context"
"errors" "errors"
"net/rpc" "net/rpc"
@ -33,11 +34,12 @@ type GRPCPlugin interface {
// GRPCServer should register this plugin for serving with the // GRPCServer should register this plugin for serving with the
// given GRPCServer. Unlike Plugin.Server, this is only called once // given GRPCServer. Unlike Plugin.Server, this is only called once
// since gRPC plugins serve singletons. // since gRPC plugins serve singletons.
GRPCServer(*grpc.Server) error GRPCServer(*GRPCBroker, *grpc.Server) error
// GRPCClient should return the interface implementation for the plugin // GRPCClient should return the interface implementation for the plugin
// you're serving via gRPC. // you're serving via gRPC. The provided context will be canceled by
GRPCClient(*grpc.ClientConn) (interface{}, error) // go-plugin in the event of the plugin process exiting.
GRPCClient(context.Context, *GRPCBroker, *grpc.ClientConn) (interface{}, error)
} }
// NetRPCUnsupportedPlugin implements Plugin but returns errors for the // NetRPCUnsupportedPlugin implements Plugin but returns errors for the

View File

@ -3,6 +3,7 @@ package plugin
import ( import (
"crypto/tls" "crypto/tls"
"crypto/x509" "crypto/x509"
"errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"log" "log"
@ -32,6 +33,7 @@ var testHandshake = HandshakeConfig{
type testInterface interface { type testInterface interface {
Double(int) int Double(int) int
PrintKV(string, interface{}) PrintKV(string, interface{})
Bidirectional() error
} }
// testInterfacePlugin is the implementation of Plugin to create // testInterfacePlugin is the implementation of Plugin to create
@ -48,13 +50,13 @@ func (p *testInterfacePlugin) Client(b *MuxBroker, c *rpc.Client) (interface{},
return &testInterfaceClient{Client: c}, nil return &testInterfaceClient{Client: c}, nil
} }
func (p *testInterfacePlugin) GRPCServer(s *grpc.Server) error { func (p *testInterfacePlugin) GRPCServer(b *GRPCBroker, s *grpc.Server) error {
grpctest.RegisterTestServer(s, &testGRPCServer{Impl: p.impl()}) grpctest.RegisterTestServer(s, &testGRPCServer{broker: b, Impl: p.impl()})
return nil return nil
} }
func (p *testInterfacePlugin) GRPCClient(c *grpc.ClientConn) (interface{}, error) { func (p *testInterfacePlugin) GRPCClient(doneCtx context.Context, b *GRPCBroker, c *grpc.ClientConn) (interface{}, error) {
return &testGRPCClient{Client: grpctest.NewTestClient(c)}, nil return &testGRPCClient{broker: b, Client: grpctest.NewTestClient(c)}, nil
} }
func (p *testInterfacePlugin) impl() testInterface { func (p *testInterfacePlugin) impl() testInterface {
@ -82,6 +84,10 @@ func (i *testInterfaceImpl) PrintKV(key string, value interface{}) {
i.logger.Info("PrintKV called", key, value) i.logger.Info("PrintKV called", key, value)
} }
func (i *testInterfaceImpl) Bidirectional() error {
return nil
}
// testInterfaceClient implements testInterface to communicate over RPC // testInterfaceClient implements testInterface to communicate over RPC
type testInterfaceClient struct { type testInterfaceClient struct {
Client *rpc.Client Client *rpc.Client
@ -107,6 +113,10 @@ func (impl *testInterfaceClient) PrintKV(key string, value interface{}) {
} }
} }
func (impl *testInterfaceClient) Bidirectional() error {
return nil
}
// testInterfaceServer is the RPC server for testInterfaceClient // testInterfaceServer is the RPC server for testInterfaceClient
type testInterfaceServer struct { type testInterfaceServer struct {
Broker *MuxBroker Broker *MuxBroker
@ -131,6 +141,7 @@ var testPluginMap = map[string]Plugin{
// testGRPCServer is the implementation of our GRPC service. // testGRPCServer is the implementation of our GRPC service.
type testGRPCServer struct { type testGRPCServer struct {
Impl testInterface Impl testInterface
broker *GRPCBroker
} }
func (s *testGRPCServer) Double( func (s *testGRPCServer) Double(
@ -160,10 +171,46 @@ func (s *testGRPCServer) PrintKV(
return &grpctest.PrintKVResponse{}, nil return &grpctest.PrintKVResponse{}, nil
} }
func (s *testGRPCServer) Bidirectional(ctx context.Context, req *grpctest.BidirectionalRequest) (*grpctest.BidirectionalResponse, error) {
conn, err := s.broker.Dial(req.Id)
if err != nil {
return nil, err
}
pingPongClient := grpctest.NewPingPongClient(conn)
resp, err := pingPongClient.Ping(ctx, &grpctest.PingRequest{})
if err != nil {
return nil, err
}
if resp.Msg != "pong" {
return nil, errors.New("Bad PingPong")
}
nextID := s.broker.NextId()
go s.broker.AcceptAndServe(nextID, func(opts []grpc.ServerOption) *grpc.Server {
s := grpc.NewServer(opts...)
grpctest.RegisterPingPongServer(s, &pingPongServer{})
return s
})
return &grpctest.BidirectionalResponse{
Id: nextID,
}, nil
}
type pingPongServer struct{}
func (p *pingPongServer) Ping(ctx context.Context, req *grpctest.PingRequest) (*grpctest.PongResponse, error) {
return &grpctest.PongResponse{
Msg: "pong",
}, nil
}
// testGRPCClient is an implementation of TestInterface that communicates // testGRPCClient is an implementation of TestInterface that communicates
// over gRPC. // over gRPC.
type testGRPCClient struct { type testGRPCClient struct {
Client grpctest.TestClient Client grpctest.TestClient
broker *GRPCBroker
} }
func (c *testGRPCClient) Double(v int) int { func (c *testGRPCClient) Double(v int) int {
@ -200,6 +247,37 @@ func (c *testGRPCClient) PrintKV(key string, value interface{}) {
} }
} }
func (c *testGRPCClient) Bidirectional() error {
nextID := c.broker.NextId()
go c.broker.AcceptAndServe(nextID, func(opts []grpc.ServerOption) *grpc.Server {
s := grpc.NewServer(opts...)
grpctest.RegisterPingPongServer(s, &pingPongServer{})
return s
})
resp, err := c.Client.Bidirectional(context.Background(), &grpctest.BidirectionalRequest{
Id: nextID,
})
if err != nil {
return err
}
conn, err := c.broker.Dial(resp.Id)
if err != nil {
return err
}
pingPongClient := grpctest.NewPingPongClient(conn)
pResp, err := pingPongClient.Ping(context.Background(), &grpctest.PingRequest{})
if err != nil {
return err
}
if pResp.Msg != "pong" {
return errors.New("Bad PingPong")
}
return nil
}
func helperProcess(s ...string) *exec.Cmd { func helperProcess(s ...string) *exec.Cmd {
cs := []string{"-test.run=TestHelperProcess", "--"} cs := []string{"-test.run=TestHelperProcess", "--"}
cs = append(cs, s...) cs = append(cs, s...)

View File

@ -2,6 +2,7 @@ package plugin
import ( import (
"bytes" "bytes"
"context"
"net" "net"
"net/rpc" "net/rpc"
@ -77,6 +78,35 @@ func TestPluginRPCConn(t testing.T, ps map[string]Plugin) (*RPCClient, *RPCServe
return client, server return client, server
} }
// TestGRPCConn returns a gRPC client conn and grpc server that are connected
// together and configured. The register function is used to register services
// prior to the Serve call. This is used to test gRPC connections.
func TestGRPCConn(t testing.T, register func(*grpc.Server)) (*grpc.ClientConn, *grpc.Server) {
// Create a listener
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
t.Fatalf("err: %s", err)
}
server := grpc.NewServer()
register(server)
go server.Serve(l)
// Connect to the server
conn, err := grpc.Dial(
l.Addr().String(),
grpc.WithBlock(),
grpc.WithInsecure())
if err != nil {
t.Fatalf("err: %s", err)
}
// Connection successful, close the listener
l.Close()
return conn, server
}
// TestPluginGRPCConn returns a plugin gRPC client and server that are connected // TestPluginGRPCConn returns a plugin gRPC client and server that are connected
// together and configured. This is used to test gRPC connections. // together and configured. This is used to test gRPC connections.
func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) { func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCServer) {
@ -110,10 +140,16 @@ func TestPluginGRPCConn(t testing.T, ps map[string]Plugin) (*GRPCClient, *GRPCSe
// Connection successful, close the listener // Connection successful, close the listener
l.Close() l.Close()
brokerGRPCClient := newGRPCBrokerClient(conn)
broker := newGRPCBroker(brokerGRPCClient, nil)
go broker.Run()
go brokerGRPCClient.StartStream()
// Create the client // Create the client
client := &GRPCClient{ client := &GRPCClient{
Conn: conn, Conn: conn,
Plugins: ps, Plugins: ps,
broker: broker,
doneCtx: context.Background(),
} }
return client, server return client, server

View File

@ -627,7 +627,7 @@ Traversal:
if lit, isLit := keyExpr.(*LiteralValueExpr); isLit { if lit, isLit := keyExpr.(*LiteralValueExpr); isLit {
litKey, _ := lit.Value(nil) litKey, _ := lit.Value(nil)
rng := hcl.RangeBetween(open.Range, close.Range) rng := hcl.RangeBetween(open.Range, close.Range)
step := &hcl.TraverseIndex{ step := hcl.TraverseIndex{
Key: litKey, Key: litKey,
SrcRange: rng, SrcRange: rng,
} }
@ -1531,7 +1531,7 @@ Character:
var detail string var detail string
switch { switch {
case len(ch) == 1 && (ch[0] == '$' || ch[0] == '!'): case len(ch) == 1 && (ch[0] == '$' || ch[0] == '%'):
detail = fmt.Sprintf( detail = fmt.Sprintf(
"The characters \"\\%s\" do not form a recognized escape sequence. To escape a \"%s{\" template sequence, use \"%s%s{\".", "The characters \"\\%s\" do not form a recognized escape sequence. To escape a \"%s{\" template sequence, use \"%s%s{\".",
ch, ch, ch, ch, ch, ch, ch, ch,
@ -1562,7 +1562,7 @@ Character:
esc = esc[:0] esc = esc[:0]
continue Character continue Character
case '$', '!': case '$', '%':
switch len(esc) { switch len(esc) {
case 1: case 1:
if len(ch) == 1 && ch[0] == esc[0] { if len(ch) == 1 && ch[0] == esc[0] {
@ -1602,8 +1602,8 @@ Character:
case '$': case '$':
esc = append(esc, '$') esc = append(esc, '$')
continue Character continue Character
case '!': case '%':
esc = append(esc, '!') esc = append(esc, '%')
continue Character continue Character
} }
} }
@ -1611,6 +1611,42 @@ Character:
} }
} }
// if we still have an outstanding "esc" when we fall out here then
// the literal ended with an unterminated escape sequence, which we
// must now deal with.
if len(esc) > 0 {
if esc[0] == '\\' {
// An incomplete backslash sequence is an error, since it suggests
// that e.g. the user started writing a \uXXXX sequence but didn't
// provide enough hex digits.
diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError,
Summary: "Invalid escape sequence",
Detail: fmt.Sprintf("The characters %q do not form a recognized escape sequence.", esc),
Subject: &hcl.Range{
Filename: tok.Range.Filename,
Start: hcl.Pos{
Line: pos.Line,
Column: pos.Column,
Byte: pos.Byte,
},
End: hcl.Pos{
Line: pos.Line,
Column: pos.Column + len(esc),
Byte: pos.Byte + len(esc),
},
},
})
}
// This might also be an incomplete $${ or %%{ escape sequence, but
// that's treated as a literal rather than an error since those only
// count as escape sequences when all three characters are present.
ret = append(ret, esc...)
esc = nil
}
return string(ret), diags return string(ret), diags
} }

View File

@ -435,7 +435,7 @@ Token:
}) })
case TokenTemplateControl: case TokenTemplateControl:
// if the opener is !{~ then we want to eat any trailing whitespace // if the opener is %{~ then we want to eat any trailing whitespace
// in the preceding literal token, assuming it is indeed a literal // in the preceding literal token, assuming it is indeed a literal
// token. // token.
if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 { if canTrimPrev && len(next.Bytes) == 3 && next.Bytes[2] == '~' && len(parts) > 0 {
@ -452,7 +452,7 @@ Token:
diags = append(diags, &hcl.Diagnostic{ diags = append(diags, &hcl.Diagnostic{
Severity: hcl.DiagError, Severity: hcl.DiagError,
Summary: "Invalid template directive", Summary: "Invalid template directive",
Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a !{ sequence.", Detail: "A template directive keyword (\"if\", \"for\", etc) is expected at the beginning of a %{ sequence.",
Subject: &kw.Range, Subject: &kw.Range,
Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(), Context: hcl.RangeBetween(next.Range, kw.Range).Ptr(),
}) })

View File

@ -465,6 +465,256 @@ block "valid" {}
}, },
}, },
}, },
{
"a = \"hello $${true}\"\n",
0,
&Body{
Attributes: Attributes{
"a": {
Name: "a",
Expr: &TemplateExpr{
Parts: []Expression{
&LiteralValueExpr{
Val: cty.StringVal("hello ${true}"),
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 6, Byte: 5},
End: hcl.Pos{Line: 1, Column: 20, Byte: 19},
},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 5, Byte: 4},
End: hcl.Pos{Line: 1, Column: 21, Byte: 20},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 21, Byte: 20},
},
NameRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 2, Byte: 1},
},
EqualsRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 3, Byte: 2},
End: hcl.Pos{Line: 1, Column: 4, Byte: 3},
},
},
},
Blocks: Blocks{},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 2, Column: 1, Byte: 21},
},
EndRange: hcl.Range{
Start: hcl.Pos{Line: 2, Column: 1, Byte: 21},
End: hcl.Pos{Line: 2, Column: 1, Byte: 21},
},
},
},
{
"a = \"hello %%{true}\"\n",
0,
&Body{
Attributes: Attributes{
"a": {
Name: "a",
Expr: &TemplateExpr{
Parts: []Expression{
&LiteralValueExpr{
Val: cty.StringVal("hello %{true}"),
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 6, Byte: 5},
End: hcl.Pos{Line: 1, Column: 20, Byte: 19},
},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 5, Byte: 4},
End: hcl.Pos{Line: 1, Column: 21, Byte: 20},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 21, Byte: 20},
},
NameRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 2, Byte: 1},
},
EqualsRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 3, Byte: 2},
End: hcl.Pos{Line: 1, Column: 4, Byte: 3},
},
},
},
Blocks: Blocks{},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 2, Column: 1, Byte: 21},
},
EndRange: hcl.Range{
Start: hcl.Pos{Line: 2, Column: 1, Byte: 21},
End: hcl.Pos{Line: 2, Column: 1, Byte: 21},
},
},
},
{
"a = \"hello $$\"\n",
0,
&Body{
Attributes: Attributes{
"a": {
Name: "a",
Expr: &TemplateExpr{
Parts: []Expression{
&LiteralValueExpr{
Val: cty.StringVal("hello $$"),
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 6, Byte: 5},
End: hcl.Pos{Line: 1, Column: 14, Byte: 13},
},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 5, Byte: 4},
End: hcl.Pos{Line: 1, Column: 15, Byte: 14},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 15, Byte: 14},
},
NameRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 2, Byte: 1},
},
EqualsRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 3, Byte: 2},
End: hcl.Pos{Line: 1, Column: 4, Byte: 3},
},
},
},
Blocks: Blocks{},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 2, Column: 1, Byte: 15},
},
EndRange: hcl.Range{
Start: hcl.Pos{Line: 2, Column: 1, Byte: 15},
End: hcl.Pos{Line: 2, Column: 1, Byte: 15},
},
},
},
{
"a = \"hello %%\"\n",
0,
&Body{
Attributes: Attributes{
"a": {
Name: "a",
Expr: &TemplateExpr{
Parts: []Expression{
&LiteralValueExpr{
Val: cty.StringVal("hello %%"),
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 6, Byte: 5},
End: hcl.Pos{Line: 1, Column: 14, Byte: 13},
},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 5, Byte: 4},
End: hcl.Pos{Line: 1, Column: 15, Byte: 14},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 15, Byte: 14},
},
NameRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 2, Byte: 1},
},
EqualsRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 3, Byte: 2},
End: hcl.Pos{Line: 1, Column: 4, Byte: 3},
},
},
},
Blocks: Blocks{},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 2, Column: 1, Byte: 15},
},
EndRange: hcl.Range{
Start: hcl.Pos{Line: 2, Column: 1, Byte: 15},
End: hcl.Pos{Line: 2, Column: 1, Byte: 15},
},
},
},
{
"a = \"hello!\"\n",
0,
&Body{
Attributes: Attributes{
"a": {
Name: "a",
Expr: &TemplateExpr{
Parts: []Expression{
&LiteralValueExpr{
Val: cty.StringVal("hello!"),
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 6, Byte: 5},
End: hcl.Pos{Line: 1, Column: 12, Byte: 11},
},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 5, Byte: 4},
End: hcl.Pos{Line: 1, Column: 13, Byte: 12},
},
},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 13, Byte: 12},
},
NameRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 1, Column: 2, Byte: 1},
},
EqualsRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 3, Byte: 2},
End: hcl.Pos{Line: 1, Column: 4, Byte: 3},
},
},
},
Blocks: Blocks{},
SrcRange: hcl.Range{
Start: hcl.Pos{Line: 1, Column: 1, Byte: 0},
End: hcl.Pos{Line: 2, Column: 1, Byte: 13},
},
EndRange: hcl.Range{
Start: hcl.Pos{Line: 2, Column: 1, Byte: 13},
End: hcl.Pos{Line: 2, Column: 1, Byte: 13},
},
},
},
{ {
"a = foo.bar\n", "a = foo.bar\n",
0, 0,

View File

@ -35,7 +35,7 @@ buildfuzz:
go-fuzz-build github.com/jmespath/go-jmespath/fuzz go-fuzz-build github.com/jmespath/go-jmespath/fuzz
fuzz: buildfuzz fuzz: buildfuzz
go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
bench: bench:
go test -bench . -cpuprofile cpu.out go test -bench . -cpuprofile cpu.out

View File

@ -1,5 +1,42 @@
package jmespath package jmespath
import "strconv"
// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
// safe for concurrent use by multiple goroutines.
type JMESPath struct {
ast ASTNode
intr *treeInterpreter
}
// Compile parses a JMESPath expression and returns, if successful, a JMESPath
// object that can be used to match against data.
func Compile(expression string) (*JMESPath, error) {
parser := NewParser()
ast, err := parser.Parse(expression)
if err != nil {
return nil, err
}
jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
return jmespath, nil
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled
// JMESPaths.
func MustCompile(expression string) *JMESPath {
jmespath, err := Compile(expression)
if err != nil {
panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
}
return jmespath
}
// Search evaluates a JMESPath expression against input data and returns the result.
func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
return jp.intr.Execute(jp.ast, data)
}
// Search evaluates a JMESPath expression against input data and returns the result. // Search evaluates a JMESPath expression against input data and returns the result.
func Search(expression string, data interface{}) (interface{}, error) { func Search(expression string, data interface{}) (interface{}, error) {
intr := newInterpreter() intr := newInterpreter()

32
vendor/github.com/jmespath/go-jmespath/api_test.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package jmespath
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestValidPrecompiledExpressionSearches(t *testing.T) {
assert := assert.New(t)
data := make(map[string]interface{})
data["foo"] = "bar"
precompiled, err := Compile("foo")
assert.Nil(err)
result, err := precompiled.Search(data)
assert.Nil(err)
assert.Equal("bar", result)
}
func TestInvalidPrecompileErrors(t *testing.T) {
assert := assert.New(t)
_, err := Compile("not a valid expression")
assert.NotNil(err)
}
func TestInvalidMustCompilePanics(t *testing.T) {
defer func() {
r := recover()
assert.NotNil(t, r)
}()
MustCompile("not a valid expression")
}

View File

@ -5,6 +5,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"math" "math"
"reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
@ -124,197 +125,197 @@ type functionCaller struct {
func newFunctionCaller() *functionCaller { func newFunctionCaller() *functionCaller {
caller := &functionCaller{} caller := &functionCaller{}
caller.functionTable = map[string]functionEntry{ caller.functionTable = map[string]functionEntry{
"length": functionEntry{ "length": {
name: "length", name: "length",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpString, jpArray, jpObject}}, {types: []jpType{jpString, jpArray, jpObject}},
}, },
handler: jpfLength, handler: jpfLength,
}, },
"starts_with": functionEntry{ "starts_with": {
name: "starts_with", name: "starts_with",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpString}}, {types: []jpType{jpString}},
argSpec{types: []jpType{jpString}}, {types: []jpType{jpString}},
}, },
handler: jpfStartsWith, handler: jpfStartsWith,
}, },
"abs": functionEntry{ "abs": {
name: "abs", name: "abs",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpNumber}}, {types: []jpType{jpNumber}},
}, },
handler: jpfAbs, handler: jpfAbs,
}, },
"avg": functionEntry{ "avg": {
name: "avg", name: "avg",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArrayNumber}}, {types: []jpType{jpArrayNumber}},
}, },
handler: jpfAvg, handler: jpfAvg,
}, },
"ceil": functionEntry{ "ceil": {
name: "ceil", name: "ceil",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpNumber}}, {types: []jpType{jpNumber}},
}, },
handler: jpfCeil, handler: jpfCeil,
}, },
"contains": functionEntry{ "contains": {
name: "contains", name: "contains",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArray, jpString}}, {types: []jpType{jpArray, jpString}},
argSpec{types: []jpType{jpAny}}, {types: []jpType{jpAny}},
}, },
handler: jpfContains, handler: jpfContains,
}, },
"ends_with": functionEntry{ "ends_with": {
name: "ends_with", name: "ends_with",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpString}}, {types: []jpType{jpString}},
argSpec{types: []jpType{jpString}}, {types: []jpType{jpString}},
}, },
handler: jpfEndsWith, handler: jpfEndsWith,
}, },
"floor": functionEntry{ "floor": {
name: "floor", name: "floor",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpNumber}}, {types: []jpType{jpNumber}},
}, },
handler: jpfFloor, handler: jpfFloor,
}, },
"map": functionEntry{ "map": {
name: "amp", name: "amp",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpExpref}}, {types: []jpType{jpExpref}},
argSpec{types: []jpType{jpArray}}, {types: []jpType{jpArray}},
}, },
handler: jpfMap, handler: jpfMap,
hasExpRef: true, hasExpRef: true,
}, },
"max": functionEntry{ "max": {
name: "max", name: "max",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, {types: []jpType{jpArrayNumber, jpArrayString}},
}, },
handler: jpfMax, handler: jpfMax,
}, },
"merge": functionEntry{ "merge": {
name: "merge", name: "merge",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpObject}, variadic: true}, {types: []jpType{jpObject}, variadic: true},
}, },
handler: jpfMerge, handler: jpfMerge,
}, },
"max_by": functionEntry{ "max_by": {
name: "max_by", name: "max_by",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArray}}, {types: []jpType{jpArray}},
argSpec{types: []jpType{jpExpref}}, {types: []jpType{jpExpref}},
}, },
handler: jpfMaxBy, handler: jpfMaxBy,
hasExpRef: true, hasExpRef: true,
}, },
"sum": functionEntry{ "sum": {
name: "sum", name: "sum",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArrayNumber}}, {types: []jpType{jpArrayNumber}},
}, },
handler: jpfSum, handler: jpfSum,
}, },
"min": functionEntry{ "min": {
name: "min", name: "min",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, {types: []jpType{jpArrayNumber, jpArrayString}},
}, },
handler: jpfMin, handler: jpfMin,
}, },
"min_by": functionEntry{ "min_by": {
name: "min_by", name: "min_by",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArray}}, {types: []jpType{jpArray}},
argSpec{types: []jpType{jpExpref}}, {types: []jpType{jpExpref}},
}, },
handler: jpfMinBy, handler: jpfMinBy,
hasExpRef: true, hasExpRef: true,
}, },
"type": functionEntry{ "type": {
name: "type", name: "type",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpAny}}, {types: []jpType{jpAny}},
}, },
handler: jpfType, handler: jpfType,
}, },
"keys": functionEntry{ "keys": {
name: "keys", name: "keys",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpObject}}, {types: []jpType{jpObject}},
}, },
handler: jpfKeys, handler: jpfKeys,
}, },
"values": functionEntry{ "values": {
name: "values", name: "values",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpObject}}, {types: []jpType{jpObject}},
}, },
handler: jpfValues, handler: jpfValues,
}, },
"sort": functionEntry{ "sort": {
name: "sort", name: "sort",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, {types: []jpType{jpArrayString, jpArrayNumber}},
}, },
handler: jpfSort, handler: jpfSort,
}, },
"sort_by": functionEntry{ "sort_by": {
name: "sort_by", name: "sort_by",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArray}}, {types: []jpType{jpArray}},
argSpec{types: []jpType{jpExpref}}, {types: []jpType{jpExpref}},
}, },
handler: jpfSortBy, handler: jpfSortBy,
hasExpRef: true, hasExpRef: true,
}, },
"join": functionEntry{ "join": {
name: "join", name: "join",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpString}}, {types: []jpType{jpString}},
argSpec{types: []jpType{jpArrayString}}, {types: []jpType{jpArrayString}},
}, },
handler: jpfJoin, handler: jpfJoin,
}, },
"reverse": functionEntry{ "reverse": {
name: "reverse", name: "reverse",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpArray, jpString}}, {types: []jpType{jpArray, jpString}},
}, },
handler: jpfReverse, handler: jpfReverse,
}, },
"to_array": functionEntry{ "to_array": {
name: "to_array", name: "to_array",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpAny}}, {types: []jpType{jpAny}},
}, },
handler: jpfToArray, handler: jpfToArray,
}, },
"to_string": functionEntry{ "to_string": {
name: "to_string", name: "to_string",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpAny}}, {types: []jpType{jpAny}},
}, },
handler: jpfToString, handler: jpfToString,
}, },
"to_number": functionEntry{ "to_number": {
name: "to_number", name: "to_number",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpAny}}, {types: []jpType{jpAny}},
}, },
handler: jpfToNumber, handler: jpfToNumber,
}, },
"not_null": functionEntry{ "not_null": {
name: "not_null", name: "not_null",
arguments: []argSpec{ arguments: []argSpec{
argSpec{types: []jpType{jpAny}, variadic: true}, {types: []jpType{jpAny}, variadic: true},
}, },
handler: jpfNotNull, handler: jpfNotNull,
}, },
@ -357,7 +358,7 @@ func (a *argSpec) typeCheck(arg interface{}) error {
return nil return nil
} }
case jpArray: case jpArray:
if _, ok := arg.([]interface{}); ok { if isSliceType(arg) {
return nil return nil
} }
case jpObject: case jpObject:
@ -409,8 +410,9 @@ func jpfLength(arguments []interface{}) (interface{}, error) {
arg := arguments[0] arg := arguments[0]
if c, ok := arg.(string); ok { if c, ok := arg.(string); ok {
return float64(utf8.RuneCountInString(c)), nil return float64(utf8.RuneCountInString(c)), nil
} else if c, ok := arg.([]interface{}); ok { } else if isSliceType(arg) {
return float64(len(c)), nil v := reflect.ValueOf(arg)
return float64(v.Len()), nil
} else if c, ok := arg.(map[string]interface{}); ok { } else if c, ok := arg.(map[string]interface{}); ok {
return float64(len(c)), nil return float64(len(c)), nil
} }

View File

@ -69,7 +69,7 @@ func TestCanSupportUserDefinedStructsRef(t *testing.T) {
func TestCanSupportStructWithSliceAll(t *testing.T) { func TestCanSupportStructWithSliceAll(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
result, err := Search("B[].Foo", data) result, err := Search("B[].Foo", data)
assert.Nil(err) assert.Nil(err)
assert.Equal([]interface{}{"f1", "correct"}, result) assert.Equal([]interface{}{"f1", "correct"}, result)
@ -77,7 +77,7 @@ func TestCanSupportStructWithSliceAll(t *testing.T) {
func TestCanSupportStructWithSlicingExpression(t *testing.T) { func TestCanSupportStructWithSlicingExpression(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
result, err := Search("B[:].Foo", data) result, err := Search("B[:].Foo", data)
assert.Nil(err) assert.Nil(err)
assert.Equal([]interface{}{"f1", "correct"}, result) assert.Equal([]interface{}{"f1", "correct"}, result)
@ -85,7 +85,7 @@ func TestCanSupportStructWithSlicingExpression(t *testing.T) {
func TestCanSupportStructWithFilterProjection(t *testing.T) { func TestCanSupportStructWithFilterProjection(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
result, err := Search("B[? `true` ].Foo", data) result, err := Search("B[? `true` ].Foo", data)
assert.Nil(err) assert.Nil(err)
assert.Equal([]interface{}{"f1", "correct"}, result) assert.Equal([]interface{}{"f1", "correct"}, result)
@ -93,7 +93,7 @@ func TestCanSupportStructWithFilterProjection(t *testing.T) {
func TestCanSupportStructWithSlice(t *testing.T) { func TestCanSupportStructWithSlice(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
result, err := Search("B[-1].Foo", data) result, err := Search("B[-1].Foo", data)
assert.Nil(err) assert.Nil(err)
assert.Equal("correct", result) assert.Equal("correct", result)
@ -109,7 +109,7 @@ func TestCanSupportStructWithOrExpressions(t *testing.T) {
func TestCanSupportStructWithSlicePointer(t *testing.T) { func TestCanSupportStructWithSlicePointer(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
data := sliceType{A: "foo", C: []*scalars{&scalars{"f1", "b1"}, &scalars{"correct", "b2"}}} data := sliceType{A: "foo", C: []*scalars{{"f1", "b1"}, {"correct", "b2"}}}
result, err := Search("C[-1].Foo", data) result, err := Search("C[-1].Foo", data)
assert.Nil(err) assert.Nil(err)
assert.Equal("correct", result) assert.Equal("correct", result)
@ -128,7 +128,7 @@ func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) {
func TestCanSupportStructWithSliceLowerCased(t *testing.T) { func TestCanSupportStructWithSliceLowerCased(t *testing.T) {
assert := assert.New(t) assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
result, err := Search("b[-1].foo", data) result, err := Search("b[-1].foo", data)
assert.Nil(err) assert.Nil(err)
assert.Equal("correct", result) assert.Equal("correct", result)
@ -173,6 +173,14 @@ func TestCanSupportProjectionsWithStructs(t *testing.T) {
assert.Equal([]interface{}{"first", "second", "third"}, result) assert.Equal([]interface{}{"first", "second", "third"}, result)
} }
func TestCanSupportSliceOfStructsWithFunctions(t *testing.T) {
assert := assert.New(t)
data := []scalars{scalars{"a1", "b1"}, scalars{"a2", "b2"}}
result, err := Search("length(@)", data)
assert.Nil(err)
assert.Equal(result.(float64), 2.0)
}
func BenchmarkInterpretSingleFieldStruct(b *testing.B) { func BenchmarkInterpretSingleFieldStruct(b *testing.B) {
intr := newInterpreter() intr := newInterpreter()
parser := NewParser() parser := NewParser()

View File

@ -11,63 +11,63 @@ var lexingTests = []struct {
expression string expression string
expected []token expected []token
}{ }{
{"*", []token{token{tStar, "*", 0, 1}}}, {"*", []token{{tStar, "*", 0, 1}}},
{".", []token{token{tDot, ".", 0, 1}}}, {".", []token{{tDot, ".", 0, 1}}},
{"[?", []token{token{tFilter, "[?", 0, 2}}}, {"[?", []token{{tFilter, "[?", 0, 2}}},
{"[]", []token{token{tFlatten, "[]", 0, 2}}}, {"[]", []token{{tFlatten, "[]", 0, 2}}},
{"(", []token{token{tLparen, "(", 0, 1}}}, {"(", []token{{tLparen, "(", 0, 1}}},
{")", []token{token{tRparen, ")", 0, 1}}}, {")", []token{{tRparen, ")", 0, 1}}},
{"[", []token{token{tLbracket, "[", 0, 1}}}, {"[", []token{{tLbracket, "[", 0, 1}}},
{"]", []token{token{tRbracket, "]", 0, 1}}}, {"]", []token{{tRbracket, "]", 0, 1}}},
{"{", []token{token{tLbrace, "{", 0, 1}}}, {"{", []token{{tLbrace, "{", 0, 1}}},
{"}", []token{token{tRbrace, "}", 0, 1}}}, {"}", []token{{tRbrace, "}", 0, 1}}},
{"||", []token{token{tOr, "||", 0, 2}}}, {"||", []token{{tOr, "||", 0, 2}}},
{"|", []token{token{tPipe, "|", 0, 1}}}, {"|", []token{{tPipe, "|", 0, 1}}},
{"29", []token{token{tNumber, "29", 0, 2}}}, {"29", []token{{tNumber, "29", 0, 2}}},
{"2", []token{token{tNumber, "2", 0, 1}}}, {"2", []token{{tNumber, "2", 0, 1}}},
{"0", []token{token{tNumber, "0", 0, 1}}}, {"0", []token{{tNumber, "0", 0, 1}}},
{"-20", []token{token{tNumber, "-20", 0, 3}}}, {"-20", []token{{tNumber, "-20", 0, 3}}},
{"foo", []token{token{tUnquotedIdentifier, "foo", 0, 3}}}, {"foo", []token{{tUnquotedIdentifier, "foo", 0, 3}}},
{`"bar"`, []token{token{tQuotedIdentifier, "bar", 0, 3}}}, {`"bar"`, []token{{tQuotedIdentifier, "bar", 0, 3}}},
// Escaping the delimiter // Escaping the delimiter
{`"bar\"baz"`, []token{token{tQuotedIdentifier, `bar"baz`, 0, 7}}}, {`"bar\"baz"`, []token{{tQuotedIdentifier, `bar"baz`, 0, 7}}},
{",", []token{token{tComma, ",", 0, 1}}}, {",", []token{{tComma, ",", 0, 1}}},
{":", []token{token{tColon, ":", 0, 1}}}, {":", []token{{tColon, ":", 0, 1}}},
{"<", []token{token{tLT, "<", 0, 1}}}, {"<", []token{{tLT, "<", 0, 1}}},
{"<=", []token{token{tLTE, "<=", 0, 2}}}, {"<=", []token{{tLTE, "<=", 0, 2}}},
{">", []token{token{tGT, ">", 0, 1}}}, {">", []token{{tGT, ">", 0, 1}}},
{">=", []token{token{tGTE, ">=", 0, 2}}}, {">=", []token{{tGTE, ">=", 0, 2}}},
{"==", []token{token{tEQ, "==", 0, 2}}}, {"==", []token{{tEQ, "==", 0, 2}}},
{"!=", []token{token{tNE, "!=", 0, 2}}}, {"!=", []token{{tNE, "!=", 0, 2}}},
{"`[0, 1, 2]`", []token{token{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, {"`[0, 1, 2]`", []token{{tJSONLiteral, "[0, 1, 2]", 1, 9}}},
{"'foo'", []token{token{tStringLiteral, "foo", 1, 3}}}, {"'foo'", []token{{tStringLiteral, "foo", 1, 3}}},
{"'a'", []token{token{tStringLiteral, "a", 1, 1}}}, {"'a'", []token{{tStringLiteral, "a", 1, 1}}},
{`'foo\'bar'`, []token{token{tStringLiteral, "foo'bar", 1, 7}}}, {`'foo\'bar'`, []token{{tStringLiteral, "foo'bar", 1, 7}}},
{"@", []token{token{tCurrent, "@", 0, 1}}}, {"@", []token{{tCurrent, "@", 0, 1}}},
{"&", []token{token{tExpref, "&", 0, 1}}}, {"&", []token{{tExpref, "&", 0, 1}}},
// Quoted identifier unicode escape sequences // Quoted identifier unicode escape sequences
{`"\u2713"`, []token{token{tQuotedIdentifier, "✓", 0, 3}}}, {`"\u2713"`, []token{{tQuotedIdentifier, "✓", 0, 3}}},
{`"\\"`, []token{token{tQuotedIdentifier, `\`, 0, 1}}}, {`"\\"`, []token{{tQuotedIdentifier, `\`, 0, 1}}},
{"`\"foo\"`", []token{token{tJSONLiteral, "\"foo\"", 1, 5}}}, {"`\"foo\"`", []token{{tJSONLiteral, "\"foo\"", 1, 5}}},
// Combinations of tokens. // Combinations of tokens.
{"foo.bar", []token{ {"foo.bar", []token{
token{tUnquotedIdentifier, "foo", 0, 3}, {tUnquotedIdentifier, "foo", 0, 3},
token{tDot, ".", 3, 1}, {tDot, ".", 3, 1},
token{tUnquotedIdentifier, "bar", 4, 3}, {tUnquotedIdentifier, "bar", 4, 3},
}}, }},
{"foo[0]", []token{ {"foo[0]", []token{
token{tUnquotedIdentifier, "foo", 0, 3}, {tUnquotedIdentifier, "foo", 0, 3},
token{tLbracket, "[", 3, 1}, {tLbracket, "[", 3, 1},
token{tNumber, "0", 4, 1}, {tNumber, "0", 4, 1},
token{tRbracket, "]", 5, 1}, {tRbracket, "]", 5, 1},
}}, }},
{"foo[?a<b]", []token{ {"foo[?a<b]", []token{
token{tUnquotedIdentifier, "foo", 0, 3}, {tUnquotedIdentifier, "foo", 0, 3},
token{tFilter, "[?", 3, 2}, {tFilter, "[?", 3, 2},
token{tUnquotedIdentifier, "a", 5, 1}, {tUnquotedIdentifier, "a", 5, 1},
token{tLT, "<", 6, 1}, {tLT, "<", 6, 1},
token{tUnquotedIdentifier, "b", 7, 1}, {tUnquotedIdentifier, "b", 7, 1},
token{tRbracket, "]", 8, 1}, {tRbracket, "]", 8, 1},
}}, }},
} }

View File

@ -353,7 +353,7 @@ func (p *Parser) nud(token token) (ASTNode, error) {
case tFlatten: case tFlatten:
left := ASTNode{ left := ASTNode{
nodeType: ASTFlatten, nodeType: ASTFlatten,
children: []ASTNode{ASTNode{nodeType: ASTIdentity}}, children: []ASTNode{{nodeType: ASTIdentity}},
} }
right, err := p.parseProjectionRHS(bindingPowers[tFlatten]) right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
if err != nil { if err != nil {
@ -378,7 +378,7 @@ func (p *Parser) nud(token token) (ASTNode, error) {
} }
return ASTNode{ return ASTNode{
nodeType: ASTProjection, nodeType: ASTProjection,
children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right}, children: []ASTNode{{nodeType: ASTIdentity}, right},
}, nil }, nil
} else { } else {
return p.parseMultiSelectList() return p.parseMultiSelectList()

View File

@ -13,7 +13,7 @@ func TestSlicePositiveStep(t *testing.T) {
input[2] = 2 input[2] = 2
input[3] = 3 input[3] = 3
input[4] = 4 input[4] = 4
result, err := slice(input, []sliceParam{sliceParam{0, true}, sliceParam{3, true}, sliceParam{1, true}}) result, err := slice(input, []sliceParam{{0, true}, {3, true}, {1, true}})
assert.Nil(err) assert.Nil(err)
assert.Equal(input[:3], result) assert.Equal(input[:3], result)
} }

View File

@ -87,7 +87,7 @@ type CLI struct {
// should be set exactly to the binary name that is autocompleted. // should be set exactly to the binary name that is autocompleted.
// //
// Autocompletion is supported via the github.com/posener/complete // Autocompletion is supported via the github.com/posener/complete
// library. This library supports both bash and zsh. To add support // library. This library supports bash, zsh and fish. To add support
// for other shells, please see that library. // for other shells, please see that library.
// //
// AutocompleteInstall and AutocompleteUninstall are the global flag // AutocompleteInstall and AutocompleteUninstall are the global flag

14
vendor/github.com/oklog/run/.gitignore generated vendored Normal file
View File

@ -0,0 +1,14 @@
# Binaries for programs and plugins
*.exe
*.dll
*.so
*.dylib
# Test binary, build with `go test -c`
*.test
# Output of the go coverage tool, specifically when used with LiteIDE
*.out
# Project-local glide cache, RE: https://github.com/Masterminds/glide/issues/736
.glide/

12
vendor/github.com/oklog/run/.travis.yml generated vendored Normal file
View File

@ -0,0 +1,12 @@
language: go
sudo: false
go:
- 1.x
- tip
install:
- go get -v github.com/golang/lint/golint
- go build ./...
script:
- go vet ./...
- $HOME/gopath/bin/golint .
- go test -v -race ./...

201
vendor/github.com/oklog/run/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

73
vendor/github.com/oklog/run/README.md generated vendored Normal file
View File

@ -0,0 +1,73 @@
# run
[![GoDoc](https://godoc.org/github.com/oklog/run?status.svg)](https://godoc.org/github.com/oklog/run)
[![Build Status](https://travis-ci.org/oklog/run.svg?branch=master)](https://travis-ci.org/oklog/run)
[![Go Report Card](https://goreportcard.com/badge/github.com/oklog/run)](https://goreportcard.com/report/github.com/oklog/run)
[![Apache 2 licensed](https://img.shields.io/badge/license-Apache2-blue.svg)](https://raw.githubusercontent.com/oklog/run/master/LICENSE)
run.Group is a universal mechanism to manage goroutine lifecycles.
Create a zero-value run.Group, and then add actors to it. Actors are defined as
a pair of functions: an **execute** function, which should run synchronously;
and an **interrupt** function, which, when invoked, should cause the execute
function to return. Finally, invoke Run, which blocks until the first actor
returns. This general-purpose API allows callers to model pretty much any
runnable task, and achieve well-defined lifecycle semantics for the group.
run.Group was written to manage component lifecycles in func main for
[OK Log](https://github.com/oklog/oklog).
But it's useful in any circumstance where you need to orchestrate multiple
goroutines as a unit whole.
[Click here](https://www.youtube.com/watch?v=LHe1Cb_Ud_M&t=15m45s) to see a
video of a talk where run.Group is described.
## Examples
### context.Context
```go
ctx, cancel := context.WithCancel(context.Background())
g.Add(func() error {
return myProcess(ctx, ...)
}, func(error) {
cancel()
})
```
### net.Listener
```go
ln, _ := net.Listen("tcp", ":8080")
g.Add(func() error {
return http.Serve(ln, nil)
}, func(error) {
ln.Close()
})
```
### io.ReadCloser
```go
var conn io.ReadCloser = ...
g.Add(func() error {
s := bufio.NewScanner(conn)
for s.Scan() {
println(s.Text())
}
return s.Err()
}, func(error) {
conn.Close()
})
```
## Comparisons
Package run is somewhat similar to package
[errgroup](https://godoc.org/golang.org/x/sync/errgroup),
except it doesn't require actor goroutines to understand context semantics.
It's somewhat similar to package
[tomb.v1](https://godoc.org/gopkg.in/tomb.v1) or
[tomb.v2](https://godoc.org/gopkg.in/tomb.v2),
except it has a much smaller API surface, delegating e.g. staged shutdown of
goroutines to the caller.

95
vendor/github.com/oklog/run/example_test.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
package run_test
import (
"context"
"errors"
"fmt"
"net"
"net/http"
"time"
"github.com/oklog/run"
)
func ExampleGroup_Add_basic() {
var g run.Group
{
cancel := make(chan struct{})
g.Add(func() error {
select {
case <-time.After(time.Second):
fmt.Printf("The first actor had its time elapsed\n")
return nil
case <-cancel:
fmt.Printf("The first actor was canceled\n")
return nil
}
}, func(err error) {
fmt.Printf("The first actor was interrupted with: %v\n", err)
close(cancel)
})
}
{
g.Add(func() error {
fmt.Printf("The second actor is returning immediately\n")
return errors.New("immediate teardown")
}, func(err error) {
// Note that this interrupt function is called, even though the
// corresponding execute function has already returned.
fmt.Printf("The second actor was interrupted with: %v\n", err)
})
}
fmt.Printf("The group was terminated with: %v\n", g.Run())
// Output:
// The second actor is returning immediately
// The first actor was interrupted with: immediate teardown
// The second actor was interrupted with: immediate teardown
// The first actor was canceled
// The group was terminated with: immediate teardown
}
func ExampleGroup_Add_context() {
ctx, cancel := context.WithCancel(context.Background())
var g run.Group
{
ctx, cancel := context.WithCancel(ctx) // note: shadowed
g.Add(func() error {
return runUntilCanceled(ctx)
}, func(error) {
cancel()
})
}
go cancel()
fmt.Printf("The group was terminated with: %v\n", g.Run())
// Output:
// The group was terminated with: context canceled
}
func ExampleGroup_Add_listener() {
var g run.Group
{
ln, _ := net.Listen("tcp", ":0")
g.Add(func() error {
defer fmt.Printf("http.Serve returned\n")
return http.Serve(ln, http.NewServeMux())
}, func(error) {
ln.Close()
})
}
{
g.Add(func() error {
return errors.New("immediate teardown")
}, func(error) {
//
})
}
fmt.Printf("The group was terminated with: %v\n", g.Run())
// Output:
// http.Serve returned
// The group was terminated with: immediate teardown
}
func runUntilCanceled(ctx context.Context) error {
<-ctx.Done()
return ctx.Err()
}

62
vendor/github.com/oklog/run/group.go generated vendored Normal file
View File

@ -0,0 +1,62 @@
// Package run implements an actor-runner with deterministic teardown. It is
// somewhat similar to package errgroup, except it does not require actor
// goroutines to understand context semantics. This makes it suitable for use in
// more circumstances; for example, goroutines which are handling connections
// from net.Listeners, or scanning input from a closable io.Reader.
package run
// Group collects actors (functions) and runs them concurrently.
// When one actor (function) returns, all actors are interrupted.
// The zero value of a Group is useful.
type Group struct {
actors []actor
}
// Add an actor (function) to the group. Each actor must be pre-emptable by an
// interrupt function. That is, if interrupt is invoked, execute should return.
// Also, it must be safe to call interrupt even after execute has returned.
//
// The first actor (function) to return interrupts all running actors.
// The error is passed to the interrupt functions, and is returned by Run.
func (g *Group) Add(execute func() error, interrupt func(error)) {
g.actors = append(g.actors, actor{execute, interrupt})
}
// Run all actors (functions) concurrently.
// When the first actor returns, all others are interrupted.
// Run only returns when all actors have exited.
// Run returns the error returned by the first exiting actor.
func (g *Group) Run() error {
if len(g.actors) == 0 {
return nil
}
// Run each actor.
errors := make(chan error, len(g.actors))
for _, a := range g.actors {
go func(a actor) {
errors <- a.execute()
}(a)
}
// Wait for the first actor to stop.
err := <-errors
// Signal all actors to stop.
for _, a := range g.actors {
a.interrupt(err)
}
// Wait for all actors to stop.
for i := 1; i < cap(errors); i++ {
<-errors
}
// Return the original error.
return err
}
type actor struct {
execute func() error
interrupt func(error)
}

57
vendor/github.com/oklog/run/group_test.go generated vendored Normal file
View File

@ -0,0 +1,57 @@
package run_test
import (
"errors"
"testing"
"time"
"github.com/oklog/run"
)
func TestZero(t *testing.T) {
var g run.Group
res := make(chan error)
go func() { res <- g.Run() }()
select {
case err := <-res:
if err != nil {
t.Errorf("%v", err)
}
case <-time.After(100 * time.Millisecond):
t.Error("timeout")
}
}
func TestOne(t *testing.T) {
myError := errors.New("foobar")
var g run.Group
g.Add(func() error { return myError }, func(error) {})
res := make(chan error)
go func() { res <- g.Run() }()
select {
case err := <-res:
if want, have := myError, err; want != have {
t.Errorf("want %v, have %v", want, have)
}
case <-time.After(100 * time.Millisecond):
t.Error("timeout")
}
}
func TestMany(t *testing.T) {
interrupt := errors.New("interrupt")
var g run.Group
g.Add(func() error { return interrupt }, func(error) {})
cancel := make(chan struct{})
g.Add(func() error { <-cancel; return nil }, func(error) { close(cancel) })
res := make(chan error)
go func() { res <- g.Run() }()
select {
case err := <-res:
if want, have := interrupt, err; want != have {
t.Errorf("want %v, have %v", want, have)
}
case <-time.After(100 * time.Millisecond):
t.Errorf("timeout")
}
}

View File

@ -72,7 +72,7 @@ func main() {
b = removeFieldsRegex.ReplaceAll(b, []byte("_")) b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
// Remove padding, hidden, or unused fields // Remove padding, hidden, or unused fields
removeFieldsRegex = regexp.MustCompile(`X_\S+`) removeFieldsRegex = regexp.MustCompile(`\bX_\S+`)
b = removeFieldsRegex.ReplaceAll(b, []byte("_")) b = removeFieldsRegex.ReplaceAll(b, []byte("_"))
} }

View File

@ -702,9 +702,9 @@ const (
AT_NO_AUTOMOUNT = 0x800 AT_NO_AUTOMOUNT = 0x800
AT_REMOVEDIR = 0x200 AT_REMOVEDIR = 0x200
AT_STAT_ = 0x0 AT_STATX_SYNC_AS_STAT = 0x0
AT_STAT_ = 0x2000 AT_STATX_FORCE_SYNC = 0x2000
AT_STAT_ = 0x4000 AT_STATX_DONT_SYNC = 0x4000
AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_FOLLOW = 0x400
AT_SYMLINK_NOFOLLOW = 0x100 AT_SYMLINK_NOFOLLOW = 0x100

View File

@ -500,6 +500,6 @@ const (
) )
// Version is the current grpc version. // Version is the current grpc version.
const Version = "1.9.1" const Version = "1.9.2"
const grpcUA = "grpc-go/" + Version const grpcUA = "grpc-go/" + Version

View File

@ -645,6 +645,8 @@ func (t *http2Client) Write(s *Stream, hdr []byte, data []byte, opts *Options) e
select { select {
case <-s.ctx.Done(): case <-s.ctx.Done():
return ContextErr(s.ctx.Err()) return ContextErr(s.ctx.Err())
case <-s.done:
return io.EOF
case <-t.ctx.Done(): case <-t.ctx.Done():
return ErrConnClosing return ErrConnClosing
default: default:
@ -1110,13 +1112,14 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) {
}() }()
s.mu.Lock() s.mu.Lock()
if !s.headerDone {
// Headers frame is not actually a trailers-only frame.
if !endStream { if !endStream {
s.recvCompress = state.encoding s.recvCompress = state.encoding
} if len(state.mdata) > 0 {
if !s.headerDone {
if !endStream && len(state.mdata) > 0 {
s.header = state.mdata s.header = state.mdata
} }
}
close(s.headerChan) close(s.headerChan)
s.headerDone = true s.headerDone = true
isHeader = true isHeader = true