diff --git a/Gopkg.lock b/Gopkg.lock index 69a3421..770c503 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,5 +1,4 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - +memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568" [[projects]] branch = "master" @@ -76,14 +75,14 @@ [[projects]] name = "github.com/asdine/storm" packages = [".","codec","codec/json","index","internal","q"] - revision = "dbd37722730b6cb703b5bd825c3f142d87358525" - version = "v2.0.0" + revision = "68fc73b635f890fe7ba2f3b15ce80c85b28a744f" + version = "v2.0.2" [[projects]] name = "github.com/aws/aws-sdk-go" packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"] - revision = "f62f7b7c5425f2b1a630932617477bdeac6dc371" - version = "v1.12.55" + revision = "fe3adbda9bc845e750e3e5767c0a14dff202b2cc" + version = "v1.12.62" [[projects]] branch = "master" @@ -101,7 +100,7 @@ branch = "master" name = "github.com/bifurcation/mint" packages = [".","syntax"] - revision = "f699e8d03646cb8e6e15410ced7bff37fcf8dddd" + revision = "350f685c15fb6b89af795dafe64fad68950948e0" [[projects]] name = "github.com/blang/semver" @@ -185,7 +184,7 @@ branch = "master" name = "github.com/hashicorp/go-getter" packages = [".","helper/url"] - revision = "994f50a6f071b07cfbea9eca9618c9674091ca51" + revision = "961f56d2e93379b7d9c578e998d09257509a6f97" [[projects]] branch = "master" @@ -203,7 +202,7 @@ branch = "master" name = "github.com/hashicorp/go-plugin" packages = ["."] - revision = "e2fbc6864d18d3c37b6cde4297ec9fca266d28f1" + revision = "e37881a3f1a07fce82b3d99ce0342a72e53386bc" [[projects]] branch = "master" @@ -233,7 +232,7 @@ branch = "master" name = "github.com/hashicorp/hcl2" packages = ["gohcl","hcl","hcl/hclsyntax","hcl/json","hcldec","hclparse"] - revision = "44bad6dbf5490f5da17ec991e664df3d017b706f" + revision = "883a81b4902ecdc60cd9d77eae4c228792827c13" [[projects]] branch = "master" @@ -243,9 +242,9 @@ [[projects]] name = "github.com/hashicorp/terraform" - packages = ["config","config/configschema","config/hcl2shim","config/module","dag","flatmap","helper/hashcode","helper/hilmapstructure","helper/schema","moduledeps","plugin","plugin/discovery","registry/regsrc","registry/response","svchost","svchost/auth","svchost/disco","terraform","tfdiags","version"] - revision = "a42fdb08a43c7fabb8898fe8c286b793bbaa4835" - version = "v0.11.1" + packages = ["config","config/configschema","config/hcl2shim","config/module","dag","flatmap","helper/hashcode","helper/hilmapstructure","helper/schema","moduledeps","plugin","plugin/discovery","registry","registry/regsrc","registry/response","svchost","svchost/auth","svchost/disco","terraform","tfdiags","version"] + revision = "a6008b8a48a749c7c167453b9cf55ffd572b9a5d" + version = "v0.11.2" [[projects]] branch = "master" @@ -256,7 +255,8 @@ [[projects]] name = "github.com/jmespath/go-jmespath" packages = ["."] - revision = "0b12d6b5" + revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9" + version = "0.2.2" [[projects]] name = "github.com/joho/godotenv" @@ -326,7 +326,7 @@ [[projects]] name = "github.com/magefile/mage" - packages = ["mg","types"] + packages = ["build","mage","mg","parse","parse/srcimporter","sh","types"] revision = "ab3ca2f6f85577d7ec82e0a6df721147a2e737f9" version = "v2.0.1" @@ -382,7 +382,7 @@ branch = "master" name = "github.com/mitchellh/mapstructure" packages = ["."] - revision = "06020f85339e21b2478f756a78e295255ffa4d6a" + revision = "b4575eea38cca1123ec2dc90c26529b5c5acfcff" [[projects]] branch = "master" @@ -406,7 +406,7 @@ branch = "master" name = "github.com/olekukonko/tablewriter" packages = ["."] - revision = "65fec0d89a572b4367094e2058d3ebe667de3b60" + revision = "96aac992fc8b1a4c83841a6c3e7178d20d989625" [[projects]] name = "github.com/pkg/errors" @@ -423,8 +423,8 @@ [[projects]] name = "github.com/satori/go.uuid" packages = ["."] - revision = "879c5887cd475cd7864858769793b2ceb0d44feb" - version = "v1.1.0" + revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3" + version = "v1.2.0" [[projects]] branch = "master" @@ -472,7 +472,7 @@ branch = "master" name = "github.com/zclconf/go-cty" packages = ["cty","cty/convert","cty/function","cty/function/stdlib","cty/gocty","cty/json","cty/set"] - revision = "48ce95f3a00f37ac934ff90a62e377146f9428e1" + revision = "709e4033eeb037dc543dbc2048065dfb814ce316" [[projects]] name = "go.uber.org/atomic" @@ -484,19 +484,19 @@ branch = "master" name = "golang.org/x/crypto" packages = ["acme","acme/autocert","bcrypt","blowfish","cast5","curve25519","hkdf","nacl/secretbox","openpgp","openpgp/armor","openpgp/elgamal","openpgp/errors","openpgp/packet","openpgp/s2k","pbkdf2","poly1305","salsa20","salsa20/salsa","tea","twofish","xtea"] - revision = "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8" + revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac" [[projects]] branch = "master" name = "golang.org/x/net" packages = ["bpf","context","html","html/atom","http2","http2/hpack","idna","internal/iana","internal/socket","internal/timeseries","ipv4","lex/httplex","trace"] - revision = "d866cfc389cec985d6fda2859936a575a55a3ab6" + revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec" [[projects]] branch = "master" name = "golang.org/x/sys" packages = ["unix"] - revision = "28a7276518d399b9634904daad79e18b44d481bc" + revision = "fff93fa7cd278d84afc205751523809c464168ab" [[projects]] branch = "master" @@ -513,18 +513,11 @@ [[projects]] name = "google.golang.org/grpc" packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","health","health/grpc_health_v1","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"] - revision = "f3955b8e9e244dd4dd4bc4f7b7a23a8445400a76" - version = "v1.9.0" + revision = "7cea4cc846bcf00cbb27595b07da5de875ef7de9" + version = "v1.9.1" [[projects]] name = "gopkg.in/alecthomas/kingpin.v2" packages = ["."] revision = "947dcec5ba9c011838740e680966fd7087a71d0d" version = "v2.2.6" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "dd3b3341036bb95c8a409729fa12b897e6515c32cfaae8218cf27d60ad1a3b07" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index ecb03ac..e69de29 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -1,138 +0,0 @@ - -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" - - -[[constraint]] - branch = "master" - name = "github.com/Xe/gopreload" - -[[constraint]] - name = "github.com/Xe/ln" - version = "0.1.0" - -[[constraint]] - branch = "master" - name = "github.com/Xe/uuid" - -[[constraint]] - branch = "master" - name = "github.com/Xe/x" - -[[constraint]] - name = "github.com/asdine/storm" - version = "2.0.0" - -[[constraint]] - branch = "master" - name = "github.com/brandur/simplebox" - -[[constraint]] - name = "github.com/caarlos0/env" - version = "3.2.0" - -[[constraint]] - branch = "master" - name = "github.com/dgryski/go-failure" - -[[constraint]] - branch = "master" - name = "github.com/dickeyxxx/netrc" - -[[constraint]] - branch = "master" - name = "github.com/facebookgo/flagenv" - -[[constraint]] - branch = "master" - name = "github.com/golang/protobuf" - -[[constraint]] - name = "github.com/google/gops" - version = "0.3.2" - -[[constraint]] - name = "github.com/hashicorp/terraform" - version = "0.11.1" - -[[constraint]] - name = "github.com/joho/godotenv" - version = "1.2.0" - -[[constraint]] - branch = "master" - name = "github.com/jtolds/qod" - -[[constraint]] - branch = "master" - name = "github.com/kr/pretty" - -[[constraint]] - name = "github.com/lucas-clemente/quic-go" - version = "0.6.0" - -[[constraint]] - name = "github.com/magefile/mage" - version = "2.0.1" - -[[constraint]] - branch = "master" - name = "github.com/mtneug/pkg" - -[[constraint]] - branch = "master" - name = "github.com/olekukonko/tablewriter" - -[[constraint]] - name = "github.com/pkg/errors" - version = "0.8.0" - -[[constraint]] - branch = "master" - name = "github.com/streamrail/concurrent-map" - -[[constraint]] - name = "github.com/xtaci/kcp-go" - version = "3.23.0" - -[[constraint]] - name = "github.com/xtaci/smux" - version = "1.0.6" - -[[constraint]] - name = "go.uber.org/atomic" - version = "1.3.1" - -[[constraint]] - branch = "master" - name = "golang.org/x/crypto" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.9.0" - -[[constraint]] - name = "gopkg.in/alecthomas/kingpin.v2" - version = "2.2.6" diff --git a/cmd/mage/main.go b/cmd/mage/main.go new file mode 100644 index 0000000..1b47cd0 --- /dev/null +++ b/cmd/mage/main.go @@ -0,0 +1,9 @@ +package main + +import ( + "os" + + "github.com/magefile/mage/mage" +) + +func main() { os.Exit(mage.Main()) } diff --git a/vendor/github.com/asdine/storm/extract.go b/vendor/github.com/asdine/storm/extract.go index 57d0561..6108cd8 100644 --- a/vendor/github.com/asdine/storm/extract.go +++ b/vendor/github.com/asdine/storm/extract.go @@ -115,6 +115,7 @@ func extractField(value *reflect.Value, field *reflect.StructField, m *structCon switch tag { case "id": f.IsID = true + f.Index = tagUniqueIdx case tagUniqueIdx, tagIdx: f.Index = tag case tagInline: @@ -163,6 +164,7 @@ func extractField(value *reflect.Value, field *reflect.StructField, m *structCon if m.ID == nil && field.Name == "ID" { if f == nil { f = &fieldConfig{ + Index: tagUniqueIdx, Name: field.Name, IsZero: isZero(value), IsInteger: isInteger(value), diff --git a/vendor/github.com/asdine/storm/extract_test.go b/vendor/github.com/asdine/storm/extract_test.go index ed8b119..1f6dcb0 100644 --- a/vendor/github.com/asdine/storm/extract_test.go +++ b/vendor/github.com/asdine/storm/extract_test.go @@ -45,7 +45,7 @@ func TestExtractUniqueTags(t *testing.T) { require.False(t, infos.ID.IsZero) require.Equal(t, "ClassicUnique", infos.Name) require.Len(t, allByType(infos, "index"), 0) - require.Len(t, allByType(infos, "unique"), 4) + require.Len(t, allByType(infos, "unique"), 5) } func TestExtractIndexTags(t *testing.T) { @@ -58,7 +58,7 @@ func TestExtractIndexTags(t *testing.T) { require.False(t, infos.ID.IsZero) require.Equal(t, "ClassicIndex", infos.Name) require.Len(t, allByType(infos, "index"), 5) - require.Len(t, allByType(infos, "unique"), 0) + require.Len(t, allByType(infos, "unique"), 1) } func TestExtractInlineWithIndex(t *testing.T) { @@ -70,7 +70,7 @@ func TestExtractInlineWithIndex(t *testing.T) { require.NotNil(t, infos.ID) require.Equal(t, "ClassicInline", infos.Name) require.Len(t, allByType(infos, "index"), 3) - require.Len(t, allByType(infos, "unique"), 2) + require.Len(t, allByType(infos, "unique"), 3) } func TestExtractMultipleTags(t *testing.T) { @@ -90,7 +90,7 @@ func TestExtractMultipleTags(t *testing.T) { require.NotNil(t, infos.ID) require.Equal(t, "User", infos.Name) require.Len(t, allByType(infos, "index"), 2) - require.Len(t, allByType(infos, "unique"), 1) + require.Len(t, allByType(infos, "unique"), 2) require.True(t, infos.Fields["Age"].Increment) require.Equal(t, int64(1), infos.Fields["Age"].IncrementStart) diff --git a/vendor/github.com/asdine/storm/finder.go b/vendor/github.com/asdine/storm/finder.go index ba23dc6..91e2e90 100644 --- a/vendor/github.com/asdine/storm/finder.go +++ b/vendor/github.com/asdine/storm/finder.go @@ -9,8 +9,8 @@ import ( "github.com/coreos/bbolt" ) -// A finder can fetch types from BoltDB -type finder interface { +// A Finder can fetch types from BoltDB. +type Finder interface { // One returns one record by the specified index One(fieldName string, value interface{}, to interface{}) error diff --git a/vendor/github.com/asdine/storm/finder_test.go b/vendor/github.com/asdine/storm/finder_test.go index daec14a..c5a1c57 100644 --- a/vendor/github.com/asdine/storm/finder_test.go +++ b/vendor/github.com/asdine/storm/finder_test.go @@ -685,3 +685,20 @@ func TestPrefix(t *testing.T) { err = db.Prefix("Group", "group3", &users) require.Equal(t, ErrNotFound, err) } + +func TestPrefixWithID(t *testing.T) { + db, cleanup := createDB(t) + defer cleanup() + + type User struct { + ID string + } + + require.NoError(t, db.Save(&User{ID: "1"})) + require.NoError(t, db.Save(&User{ID: "10"})) + + var users []User + + require.NoError(t, db.Prefix("ID", "1", &users)) + require.Len(t, users, 2) +} diff --git a/vendor/github.com/asdine/storm/kv.go b/vendor/github.com/asdine/storm/kv.go index b12de6d..36d2b5e 100644 --- a/vendor/github.com/asdine/storm/kv.go +++ b/vendor/github.com/asdine/storm/kv.go @@ -6,8 +6,8 @@ import ( "github.com/coreos/bbolt" ) -// keyValueStore can store and fetch values by key -type keyValueStore interface { +// KeyValueStore can store and fetch values by key +type KeyValueStore interface { // Get a value from a bucket Get(bucketName string, key interface{}, to interface{}) error // Set a key/value pair into a bucket diff --git a/vendor/github.com/asdine/storm/node.go b/vendor/github.com/asdine/storm/node.go index 9e94cc6..cb61a25 100644 --- a/vendor/github.com/asdine/storm/node.go +++ b/vendor/github.com/asdine/storm/node.go @@ -7,10 +7,11 @@ import ( // A Node in Storm represents the API to a BoltDB bucket. type Node interface { - tx - typeStore - keyValueStore - bucketScanner + Tx + TypeStore + KeyValueStore + BucketScanner + // From returns a new Storm node with a new bucket root below the current. // All DB operations on the new node will be executed relative to this bucket. From(addend ...string) Node diff --git a/vendor/github.com/asdine/storm/scan.go b/vendor/github.com/asdine/storm/scan.go index 56fc9f6..81d1f16 100644 --- a/vendor/github.com/asdine/storm/scan.go +++ b/vendor/github.com/asdine/storm/scan.go @@ -6,8 +6,8 @@ import ( "github.com/coreos/bbolt" ) -// A bucketScanner scans a Node for a list of buckets -type bucketScanner interface { +// A BucketScanner scans a Node for a list of buckets +type BucketScanner interface { // PrefixScan scans the root buckets for keys matching the given prefix. PrefixScan(prefix string) []Node // PrefixScan scans the buckets in this node for keys matching the given prefix. diff --git a/vendor/github.com/asdine/storm/store.go b/vendor/github.com/asdine/storm/store.go index c7f61d8..9c90094 100644 --- a/vendor/github.com/asdine/storm/store.go +++ b/vendor/github.com/asdine/storm/store.go @@ -9,9 +9,9 @@ import ( "github.com/coreos/bbolt" ) -// typeStore stores user defined types in BoltDB -type typeStore interface { - finder +// TypeStore stores user defined types in BoltDB. +type TypeStore interface { + Finder // Init creates the indexes and buckets for a given structure Init(data interface{}) error diff --git a/vendor/github.com/asdine/storm/transaction.go b/vendor/github.com/asdine/storm/transaction.go index 14e9708..9034493 100644 --- a/vendor/github.com/asdine/storm/transaction.go +++ b/vendor/github.com/asdine/storm/transaction.go @@ -2,8 +2,8 @@ package storm import "github.com/coreos/bbolt" -// tx is a transaction -type tx interface { +// Tx is a transaction. +type Tx interface { // Commit writes all changes to disk. Commit() error diff --git a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md index 85d9c90..a141f57 100644 --- a/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md +++ b/vendor/github.com/aws/aws-sdk-go/CHANGELOG.md @@ -1,3 +1,65 @@ +Release v1.12.62 (2018-01-15) +=== + +### Service Client Updates +* `aws/endpoints`: Updated Regions and Endpoints metadata. +* `service/lambda`: Updates service API and documentation + * Support for creating Lambda Functions using 'dotnetcore2.0' and 'go1.x'. + +Release v1.12.61 (2018-01-12) +=== + +### Service Client Updates +* `service/glue`: Updates service API and documentation + * Support is added to generate ETL scripts in Scala which can now be run by AWS Glue ETL jobs. In addition, the trigger API now supports firing when any conditions are met (in addition to all conditions). Also, jobs can be triggered based on a "failed" or "stopped" job run (in addition to a "succeeded" job run). + +Release v1.12.60 (2018-01-11) +=== + +### Service Client Updates +* `service/elasticloadbalancing`: Updates service API and documentation +* `service/elasticloadbalancingv2`: Updates service API and documentation +* `service/rds`: Updates service API and documentation + * Read Replicas for Amazon RDS for MySQL, MariaDB, and PostgreSQL now support Multi-AZ deployments.Amazon RDS Read Replicas enable you to create one or more read-only copies of your database instance within the same AWS Region or in a different AWS Region. Updates made to the source database are asynchronously copied to the Read Replicas. In addition to providing scalability for read-heavy workloads, you can choose to promote a Read Replica to become standalone a DB instance when needed.Amazon RDS Multi-AZ Deployments provide enhanced availability for database instances within a single AWS Region. With Multi-AZ, your data is synchronously replicated to a standby in a different Availability Zone (AZ). In case of an infrastructure failure, Amazon RDS performs an automatic failover to the standby, minimizing disruption to your applications.You can now combine Read Replicas with Multi-AZ as part of a disaster recovery strategy for your production databases. A well-designed and tested plan is critical for maintaining business continuity after a disaster. Since Read Replicas can also be created in different regions than the source database, your Read Replica can be promoted to become the new production database in case of a regional disruption.You can also combine Read Replicas with Multi-AZ for your database engine upgrade process. You can create a Read Replica of your production database instance and upgrade it to a new database engine version. When the upgrade is complete, you can stop applications, promote the Read Replica to a standalone database instance and switch over your applications. Since the database instance is already a Multi-AZ deployment, no additional steps are needed.For more information, see the Amazon RDS User Guide. +* `service/ssm`: Updates service documentation + * Updates documentation for the HierarchyLevelLimitExceededException error. + +Release v1.12.59 (2018-01-09) +=== + +### Service Client Updates +* `service/kms`: Updates service documentation + * Documentation updates for AWS KMS + +Release v1.12.58 (2018-01-09) +=== + +### Service Client Updates +* `service/ds`: Updates service API and documentation + * On October 24 we introduced AWS Directory Service for Microsoft Active Directory (Standard Edition), also known as AWS Microsoft AD (Standard Edition), which is a managed Microsoft Active Directory (AD) that is optimized for small and midsize businesses (SMBs). With this SDK release, you can now create an AWS Microsoft AD directory using API. This enables you to run typical SMB workloads using a cost-effective, highly available, and managed Microsoft AD in the AWS Cloud. + +Release v1.12.57 (2018-01-08) +=== + +### Service Client Updates +* `service/codedeploy`: Updates service API and documentation + * The AWS CodeDeploy API was updated to support DeleteGitHubAccountToken, a new method that deletes a GitHub account connection. +* `service/discovery`: Updates service API and documentation + * Documentation updates for AWS Application Discovery Service. +* `service/route53`: Updates service API and documentation + * This release adds an exception to the CreateTrafficPolicyVersion API operation. + +Release v1.12.56 (2018-01-05) +=== + +### Service Client Updates +* `service/inspector`: Updates service API, documentation, and examples + * Added 2 new attributes to the DescribeAssessmentTemplate response, indicating the total number of assessment runs and last assessment run ARN (if present.) +* `service/snowball`: Updates service documentation + * Documentation updates for snowball +* `service/ssm`: Updates service documentation + * Documentation updates for ssm + Release v1.12.55 (2018-01-02) === diff --git a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go index 56f08e3..709f648 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/endpoints/defaults.go @@ -740,6 +740,7 @@ var awsPartition = partition{ "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-2": endpoint{}, + "eu-west-3": endpoint{}, "sa-east-1": endpoint{}, "us-east-1": endpoint{}, "us-east-2": endpoint{}, @@ -1636,6 +1637,7 @@ var awsPartition = partition{ "ap-northeast-2": endpoint{}, "ap-south-1": endpoint{}, "ap-southeast-2": endpoint{}, + "ca-central-1": endpoint{}, "eu-central-1": endpoint{}, "eu-west-1": endpoint{}, "eu-west-3": endpoint{}, diff --git a/vendor/github.com/aws/aws-sdk-go/aws/version.go b/vendor/github.com/aws/aws-sdk-go/aws/version.go index a92ed43..6b7bd46 100644 --- a/vendor/github.com/aws/aws-sdk-go/aws/version.go +++ b/vendor/github.com/aws/aws-sdk-go/aws/version.go @@ -5,4 +5,4 @@ package aws const SDKName = "aws-sdk-go" // SDKVersion is the version of this SDK -const SDKVersion = "1.12.55" +const SDKVersion = "1.12.62" diff --git a/vendor/github.com/bifurcation/mint/client-state-machine.go b/vendor/github.com/bifurcation/mint/client-state-machine.go index b72c584..ddf9ee9 100644 --- a/vendor/github.com/bifurcation/mint/client-state-machine.go +++ b/vendor/github.com/bifurcation/mint/client-state-machine.go @@ -103,7 +103,8 @@ func (state ClientStateStart) Next(hr handshakeMessageReader) (HandshakeState, [ // Construct base ClientHello ch := &ClientHelloBody{ - CipherSuites: state.Caps.CipherSuites, + LegacyVersion: wireVersion(state.hsCtx.hIn), + CipherSuites: state.Caps.CipherSuites, } _, err := prng.Read(ch.Random[:]) if err != nil { diff --git a/vendor/github.com/bifurcation/mint/common.go b/vendor/github.com/bifurcation/mint/common.go index 3dc44f5..565d15e 100644 --- a/vendor/github.com/bifurcation/mint/common.go +++ b/vendor/github.com/bifurcation/mint/common.go @@ -6,9 +6,10 @@ import ( ) const ( - supportedVersion uint16 = 0x7f16 // draft-22 - tls12Version uint16 = 0x0303 - tls10Version uint16 = 0x0301 + supportedVersion uint16 = 0x7f16 // draft-22 + tls12Version uint16 = 0x0303 + tls10Version uint16 = 0x0301 + dtls12WireVersion uint16 = 0xfefd ) var ( diff --git a/vendor/github.com/bifurcation/mint/dtls.go b/vendor/github.com/bifurcation/mint/dtls.go index e7ed205..df4f1aa 100644 --- a/vendor/github.com/bifurcation/mint/dtls.go +++ b/vendor/github.com/bifurcation/mint/dtls.go @@ -1,7 +1,28 @@ package mint +import ( + "fmt" +) + // This file is a placeholder. DTLS-specific stuff (timer management, // ACKs, retransmits, etc. will eventually go here. const ( initialMtu = 1200 ) + +func wireVersion(h *HandshakeLayer) uint16 { + if h.datagram { + return dtls12WireVersion + } + return tls12Version +} + +func dtlsConvertVersion(version uint16) uint16 { + if version == tls12Version { + return dtls12WireVersion + } + if version == tls10Version { + return 0xfeff + } + panic(fmt.Sprintf("Internal error, unexpected version=%d", version)) +} diff --git a/vendor/github.com/bifurcation/mint/handshake-layer.go b/vendor/github.com/bifurcation/mint/handshake-layer.go index 48d086b..888c5f3 100644 --- a/vendor/github.com/bifurcation/mint/handshake-layer.go +++ b/vendor/github.com/bifurcation/mint/handshake-layer.go @@ -77,8 +77,6 @@ func (hm HandshakeMessage) ToBody() (HandshakeMessageBody, error) { body = new(ClientHelloBody) case HandshakeTypeServerHello: body = new(ServerHelloBody) - case HandshakeTypeHelloRetryRequest: - body = new(HelloRetryRequestBody) case HandshakeTypeEncryptedExtensions: body = new(EncryptedExtensionsBody) case HandshakeTypeCertificate: diff --git a/vendor/github.com/bifurcation/mint/handshake-messages.go b/vendor/github.com/bifurcation/mint/handshake-messages.go index b29faa3..5a229f1 100644 --- a/vendor/github.com/bifurcation/mint/handshake-messages.go +++ b/vendor/github.com/bifurcation/mint/handshake-messages.go @@ -25,14 +25,14 @@ type HandshakeMessageBody interface { // Extension extensions<0..2^16-1>; // } ClientHello; type ClientHelloBody struct { - // Omitted: clientVersion + LegacyVersion uint16 Random [32]byte LegacySessionID []byte CipherSuites []CipherSuite Extensions ExtensionList } -type clientHelloBodyInner struct { +type clientHelloBodyInnerTLS struct { LegacyVersion uint16 Random [32]byte LegacySessionID []byte `tls:"head=1,max=32"` @@ -41,41 +41,86 @@ type clientHelloBodyInner struct { Extensions []Extension `tls:"head=2"` } +type clientHelloBodyInnerDTLS struct { + LegacyVersion uint16 + Random [32]byte + LegacySessionID []byte `tls:"head=1,max=32"` + EmptyCookie uint8 + CipherSuites []CipherSuite `tls:"head=2,min=2"` + LegacyCompressionMethods []byte `tls:"head=1,min=1"` + Extensions []Extension `tls:"head=2"` +} + func (ch ClientHelloBody) Type() HandshakeType { return HandshakeTypeClientHello } func (ch ClientHelloBody) Marshal() ([]byte, error) { - return syntax.Marshal(clientHelloBodyInner{ - LegacyVersion: tls12Version, - Random: ch.Random, - LegacySessionID: []byte{}, - CipherSuites: ch.CipherSuites, - LegacyCompressionMethods: []byte{0}, - Extensions: ch.Extensions, - }) + if ch.LegacyVersion == tls12Version { + return syntax.Marshal(clientHelloBodyInnerTLS{ + LegacyVersion: ch.LegacyVersion, + Random: ch.Random, + LegacySessionID: []byte{}, + CipherSuites: ch.CipherSuites, + LegacyCompressionMethods: []byte{0}, + Extensions: ch.Extensions, + }) + } else { + return syntax.Marshal(clientHelloBodyInnerDTLS{ + LegacyVersion: ch.LegacyVersion, + Random: ch.Random, + LegacySessionID: []byte{}, + CipherSuites: ch.CipherSuites, + LegacyCompressionMethods: []byte{0}, + Extensions: ch.Extensions, + }) + } + } func (ch *ClientHelloBody) Unmarshal(data []byte) (int, error) { - var inner clientHelloBodyInner - read, err := syntax.Unmarshal(data, &inner) - if err != nil { - return 0, err - } + var read int + var err error - // We are strict about these things because we only support 1.3 - if inner.LegacyVersion != tls12Version { - return 0, fmt.Errorf("tls.clienthello: Incorrect version number") - } + // Note that this might be 0, in which case we do TLS. That + // makes the tests easier. + if ch.LegacyVersion != dtls12WireVersion { + var inner clientHelloBodyInnerTLS + read, err = syntax.Unmarshal(data, &inner) + if err != nil { + return 0, err + } - if len(inner.LegacyCompressionMethods) != 1 || inner.LegacyCompressionMethods[0] != 0 { - return 0, fmt.Errorf("tls.clienthello: Invalid compression method") - } + if len(inner.LegacyCompressionMethods) != 1 || inner.LegacyCompressionMethods[0] != 0 { + return 0, fmt.Errorf("tls.clienthello: Invalid compression method") + } - ch.Random = inner.Random - ch.LegacySessionID = inner.LegacySessionID - ch.CipherSuites = inner.CipherSuites - ch.Extensions = inner.Extensions + ch.LegacyVersion = inner.LegacyVersion + ch.Random = inner.Random + ch.LegacySessionID = inner.LegacySessionID + ch.CipherSuites = inner.CipherSuites + ch.Extensions = inner.Extensions + } else { + var inner clientHelloBodyInnerDTLS + read, err = syntax.Unmarshal(data, &inner) + if err != nil { + return 0, err + } + + if inner.EmptyCookie != 0 { + return 0, fmt.Errorf("tls.clienthello: Invalid cookie") + } + + if len(inner.LegacyCompressionMethods) != 1 || inner.LegacyCompressionMethods[0] != 0 { + return 0, fmt.Errorf("tls.clienthello: Invalid compression method") + } + + ch.LegacyVersion = inner.LegacyVersion + ch.Random = inner.Random + ch.LegacySessionID = inner.LegacySessionID + ch.CipherSuites = inner.CipherSuites + ch.Extensions = inner.Extensions + } return read, nil } @@ -120,29 +165,6 @@ func (ch ClientHelloBody) Truncated() ([]byte, error) { return chData[:chLen-binderLen], nil } -// struct { -// ProtocolVersion server_version; -// CipherSuite cipher_suite; -// Extension extensions<2..2^16-1>; -// } HelloRetryRequest; -type HelloRetryRequestBody struct { - Version uint16 - CipherSuite CipherSuite - Extensions ExtensionList `tls:"head=2,min=2"` -} - -func (hrr HelloRetryRequestBody) Type() HandshakeType { - return HandshakeTypeHelloRetryRequest -} - -func (hrr HelloRetryRequestBody) Marshal() ([]byte, error) { - return syntax.Marshal(hrr) -} - -func (hrr *HelloRetryRequestBody) Unmarshal(data []byte) (int, error) { - return syntax.Unmarshal(data, hrr) -} - // struct { // ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */ // Random random; diff --git a/vendor/github.com/bifurcation/mint/handshake-messages_test.go b/vendor/github.com/bifurcation/mint/handshake-messages_test.go index a298306..92e9cac 100644 --- a/vendor/github.com/bifurcation/mint/handshake-messages_test.go +++ b/vendor/github.com/bifurcation/mint/handshake-messages_test.go @@ -34,6 +34,7 @@ var ( 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37} chCipherSuites = []CipherSuite{0x0001, 0x0002, 0x0003} chValidIn = ClientHelloBody{ + LegacyVersion: tls12Version, Random: helloRandom, CipherSuites: chCipherSuites, Extensions: extListValidIn, @@ -49,8 +50,9 @@ var ( chTruncHex = "01000062" + "0303" + hex.EncodeToString(helloRandom[:]) + "00" + "0006000100020003" + "0100" + "00330029002f000a00040102030405060708" chTruncValid = ClientHelloBody{ - Random: helloRandom, - CipherSuites: chCipherSuites, + LegacyVersion: tls12Version, + Random: helloRandom, + CipherSuites: chCipherSuites, Extensions: []Extension{ { ExtensionType: ExtensionTypePreSharedKey, @@ -60,35 +62,28 @@ var ( } chTruncInvalid = ClientHelloBody{} chTruncNoExt = ClientHelloBody{ - Random: helloRandom, - CipherSuites: chCipherSuites, - Extensions: []Extension{}, + LegacyVersion: tls12Version, + Random: helloRandom, + CipherSuites: chCipherSuites, + Extensions: []Extension{}, } chTruncNoPSK = ClientHelloBody{ - Random: helloRandom, - CipherSuites: chCipherSuites, + LegacyVersion: tls12Version, + Random: helloRandom, + CipherSuites: chCipherSuites, Extensions: []Extension{ {ExtensionType: ExtensionTypeEarlyData}, }, } chTruncBadPSK = ClientHelloBody{ - Random: helloRandom, - CipherSuites: chCipherSuites, + LegacyVersion: tls12Version, + Random: helloRandom, + CipherSuites: chCipherSuites, Extensions: []Extension{ {ExtensionType: ExtensionTypePreSharedKey}, }, } - // HelloRetryRequest test cases - hrrValidIn = HelloRetryRequestBody{ - Version: supportedVersion, - CipherSuite: 0x0001, - Extensions: extListValidIn, - } - hrrEmptyIn = HelloRetryRequestBody{} - hrrValidHex = supportedVersionHex + "0001" + extListValidHex - hrrEmptyHex = supportedVersionHex + "0001" + "0000" - // ServerHello test cases shValidIn = ServerHelloBody{ Version: tls12Version, @@ -290,12 +285,6 @@ func TestClientHelloMarshalUnmarshal(t *testing.T) { _, err = ch.Unmarshal(chValid[:fixedClientHelloBodyLen-1]) assertError(t, err, "Unmarshaled a ClientHello below the min length") - // Test unmarshal failure on wrong version - chValid[1]-- - _, err = ch.Unmarshal(chValid) - assertError(t, err, "Unmarshaled a ClientHello with the wrong version") - chValid[1]++ - // Test unmarshal failure on ciphersuite size overflow chValid[35] = 0xFF _, err = ch.Unmarshal(chValid) @@ -352,34 +341,6 @@ func TestClientHelloTruncate(t *testing.T) { assertError(t, err, "Truncated a ClientHello with a mal-formed PSK") } -func TestHelloRetryRequestMarshalUnmarshal(t *testing.T) { - hrrValid := unhex(hrrValidHex) - hrrEmpty := unhex(hrrEmptyHex) - - // Test correctness of handshake type - assertEquals(t, (HelloRetryRequestBody{}).Type(), HandshakeTypeHelloRetryRequest) - - // Test successful marshal - out, err := hrrValidIn.Marshal() - assertNotError(t, err, "Failed to marshal a valid HelloRetryRequest") - assertByteEquals(t, out, hrrValid) - - // Test marshal failure with no extensions present - out, err = hrrEmptyIn.Marshal() - assertError(t, err, "Marshaled HelloRetryRequest with no extensions") - - // Test successful unmarshal - var hrr HelloRetryRequestBody - read, err := hrr.Unmarshal(hrrValid) - assertNotError(t, err, "Failed to unmarshal a valid HelloRetryRequest") - assertEquals(t, read, len(hrrValid)) - assertDeepEquals(t, hrr, hrrValidIn) - - // Test unmarshal failure with no extensions present - read, err = hrr.Unmarshal(hrrEmpty) - assertError(t, err, "Unmarshaled a HelloRetryRequest with no extensions") -} - func TestServerHelloMarshalUnmarshal(t *testing.T) { shValid := unhex(shValidHex) shEmpty := unhex(shEmptyHex) diff --git a/vendor/github.com/bifurcation/mint/record-layer.go b/vendor/github.com/bifurcation/mint/record-layer.go index ee9fe43..761a868 100644 --- a/vendor/github.com/bifurcation/mint/record-layer.go +++ b/vendor/github.com/bifurcation/mint/record-layer.go @@ -119,6 +119,15 @@ func (r *RecordLayer) Rekey(epoch Epoch, factory aeadFactory, key []byte, iv []b return nil } +func (c *cipherState) formatSeq(datagram bool) []byte { + seq := append([]byte{}, c.seq...) + if datagram { + seq[0] = byte(c.epoch >> 8) + seq[1] = byte(c.epoch & 0xff) + } + return seq +} + func (c *cipherState) computeNonce(seq []byte) []byte { nonce := make([]byte, len(c.iv)) copy(nonce, c.iv) @@ -143,9 +152,9 @@ func (c *cipherState) incrementSequenceNumber() { if i < 0 { // Not allowed to let sequence number wrap. // Instead, must renegotiate before it does. - // Not likely enough to bothec. + // Not likely enough to bother. // TODO(ekr@rtfm.com): Check for DTLS here - // because the limit is soonec. + // because the limit is sooner. panic("TLS: sequence number wraparound") } } @@ -157,7 +166,8 @@ func (c *cipherState) overhead() int { return c.cipher.Overhead() } -func (r *RecordLayer) encrypt(cipher *cipherState, pt *TLSPlaintext, padLen int) *TLSPlaintext { +func (r *RecordLayer) encrypt(cipher *cipherState, seq []byte, pt *TLSPlaintext, padLen int) *TLSPlaintext { + logf(logTypeIO, "Encrypt seq=[%x]", seq) // Expand the fragment to hold contentType, padding, and overhead originalLen := len(pt.fragment) plaintextLen := originalLen + 1 + padLen @@ -165,6 +175,7 @@ func (r *RecordLayer) encrypt(cipher *cipherState, pt *TLSPlaintext, padLen int) // Assemble the revised plaintext out := &TLSPlaintext{ + contentType: RecordTypeApplicationData, fragment: make([]byte, ciphertextLen), } @@ -176,11 +187,12 @@ func (r *RecordLayer) encrypt(cipher *cipherState, pt *TLSPlaintext, padLen int) // Encrypt the fragment payload := out.fragment[:plaintextLen] - cipher.cipher.Seal(payload[:0], cipher.computeNonce(cipher.seq), payload, nil) + cipher.cipher.Seal(payload[:0], cipher.computeNonce(seq), payload, nil) return out } func (r *RecordLayer) decrypt(pt *TLSPlaintext, seq []byte) (*TLSPlaintext, int, error) { + logf(logTypeIO, "Decrypt seq=[%x]", seq) if len(pt.fragment) < r.cipher.overhead() { msg := fmt.Sprintf("tls.record.decrypt: Record too short [%d] < [%d]", len(pt.fragment), r.cipher.overhead()) return nil, 0, DecryptError(msg) @@ -312,6 +324,8 @@ func (r *RecordLayer) nextRecord() (*TLSPlaintext, error) { if r.datagram { seq = header[3:11] } + // TODO(ekr@rtfm.com): Handle the wrong epoch. + // TODO(ekr@rtfm.com): Handle duplicates. logf(logTypeIO, "RecordLayer.ReadRecord epoch=[%s] seq=[%x] [%d] ciphertext=[%x]", cipher.epoch.label(), seq, pt.contentType, pt.fragment) pt, _, err = r.decrypt(pt, seq) if err != nil { @@ -341,9 +355,11 @@ func (r *RecordLayer) WriteRecordWithPadding(pt *TLSPlaintext, padLen int) error } func (r *RecordLayer) writeRecordWithPadding(pt *TLSPlaintext, cipher *cipherState, padLen int) error { + seq := cipher.formatSeq(r.datagram) + if cipher.cipher != nil { logf(logTypeIO, "RecordLayer.WriteRecord epoch=[%s] seq=[%x] [%d] plaintext=[%x]", cipher.epoch.label(), cipher.seq, pt.contentType, pt.fragment) - pt = r.encrypt(cipher, pt, padLen) + pt = r.encrypt(cipher, seq, pt, padLen) } else if padLen > 0 { return fmt.Errorf("tls.record: Padding can only be done on encrypted records") } @@ -354,16 +370,17 @@ func (r *RecordLayer) writeRecordWithPadding(pt *TLSPlaintext, cipher *cipherSta length := len(pt.fragment) var header []byte + if !r.datagram { header = []byte{byte(pt.contentType), byte(r.version >> 8), byte(r.version & 0xff), byte(length >> 8), byte(length)} } else { - // TODO(ekr@rtfm.com): Double check version - seq := cipher.seq - header = []byte{byte(pt.contentType), 0xfe, 0xff, - 0x00, 0x00, // TODO(ekr@rtfm.com): double-check epoch - seq[2], seq[3], seq[4], seq[5], seq[6], seq[7], + version := dtlsConvertVersion(r.version) + header = []byte{byte(pt.contentType), + byte(version >> 8), byte(version & 0xff), + seq[0], seq[1], seq[2], seq[3], + seq[4], seq[5], seq[6], seq[7], byte(length >> 8), byte(length)} } record := append(header, pt.fragment...) diff --git a/vendor/github.com/bifurcation/mint/record-layer_test.go b/vendor/github.com/bifurcation/mint/record-layer_test.go index 6d9278f..6d1ed1e 100644 --- a/vendor/github.com/bifurcation/mint/record-layer_test.go +++ b/vendor/github.com/bifurcation/mint/record-layer_test.go @@ -256,7 +256,9 @@ func TestReadWriteDTLS(t *testing.T) { b := bytes.NewBuffer(nil) out := NewRecordLayerDTLS(b) + out.SetVersion(tls12Version) in := NewRecordLayerDTLS(b) + in.SetVersion(tls12Version) // Unencrypted ptIn := &TLSPlaintext{ diff --git a/vendor/github.com/bifurcation/mint/server-state-machine.go b/vendor/github.com/bifurcation/mint/server-state-machine.go index a57cdfc..6c2d7b3 100644 --- a/vendor/github.com/bifurcation/mint/server-state-machine.go +++ b/vendor/github.com/bifurcation/mint/server-state-machine.go @@ -92,12 +92,18 @@ func (state ServerStateStart) Next(hr handshakeMessageReader) (HandshakeState, [ return nil, nil, AlertUnexpectedMessage } - ch := &ClientHelloBody{} + ch := &ClientHelloBody{LegacyVersion: wireVersion(state.hsCtx.hIn)} if err := safeUnmarshal(ch, hm.body); err != nil { logf(logTypeHandshake, "[ServerStateStart] Error decoding message: %v", err) return nil, nil, AlertDecodeError } + // We are strict about these things because we only support 1.3 + if ch.LegacyVersion != wireVersion(state.hsCtx.hIn) { + logf(logTypeHandshake, "[ServerStateStart] Invalid version number: %v", ch.LegacyVersion) + return nil, nil, AlertDecodeError + } + clientHello := hm connParams := ConnectionParameters{} diff --git a/vendor/github.com/bifurcation/mint/tls.go b/vendor/github.com/bifurcation/mint/tls.go index 8deafd1..4d22869 100644 --- a/vendor/github.com/bifurcation/mint/tls.go +++ b/vendor/github.com/bifurcation/mint/tls.go @@ -93,6 +93,7 @@ func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config) (* if config != nil && config.NonBlocking { return nil, errors.New("dialing not possible in non-blocking mode") } + // We want the Timeout and Deadline values from dialer to cover the // whole process: TCP connection and TLS handshake. This means that we // also need to start our own timers now. @@ -127,16 +128,20 @@ func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config) (* if config == nil { config = &Config{} + } else { + config = config.Clone() } + // If no ServerName is set, infer the ServerName // from the hostname we're connecting to. if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := config.Clone() - c.ServerName = hostname - config = c + config.ServerName = hostname + } + // Set up DTLS as needed. + config.UseDTLS = (network == "udp") + conn := Client(rawConn, config) if timeout == 0 { diff --git a/vendor/github.com/hashicorp/go-getter/README.md b/vendor/github.com/hashicorp/go-getter/README.md index a35d55b..40ace74 100644 --- a/vendor/github.com/hashicorp/go-getter/README.md +++ b/vendor/github.com/hashicorp/go-getter/README.md @@ -285,7 +285,7 @@ be used automatically. * `aws_access_key_id` (required) - Minio access key. * `aws_access_key_secret` (required) - Minio access key secret. * `region` (optional - defaults to us-east-1) - Region identifier to use. - * `version` (optional - fefaults to Minio default) - Configuration file format. + * `version` (optional - defaults to Minio default) - Configuration file format. #### S3 Bucket Examples diff --git a/vendor/github.com/hashicorp/go-plugin/server.go b/vendor/github.com/hashicorp/go-plugin/server.go index e154321..1e808b9 100644 --- a/vendor/github.com/hashicorp/go-plugin/server.go +++ b/vendor/github.com/hashicorp/go-plugin/server.go @@ -66,6 +66,10 @@ type ServeConfig struct { // the gRPC health checking service. This is not optional since go-plugin // relies on this to implement Ping(). GRPCServer func([]grpc.ServerOption) *grpc.Server + + // Logger is used to pass a logger into the server. If none is provided the + // server will create a default logger. + Logger hclog.Logger } // Protocol returns the protocol that this server should speak. @@ -106,12 +110,15 @@ func Serve(opts *ServeConfig) { // Logging goes to the original stderr log.SetOutput(os.Stderr) - // internal logger to os.Stderr - logger := hclog.New(&hclog.LoggerOptions{ - Level: hclog.Trace, - Output: os.Stderr, - JSONFormat: true, - }) + logger := opts.Logger + if logger == nil { + // internal logger to os.Stderr + logger = hclog.New(&hclog.LoggerOptions{ + Level: hclog.Trace, + Output: os.Stderr, + JSONFormat: true, + }) + } // Create our new stdout, stderr files. These will override our built-in // stdout/stderr so that it works across the stream boundary. diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go index 9776f04..dfa473a 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text.go @@ -2,7 +2,6 @@ package hcl import ( "bufio" - "bytes" "errors" "fmt" "io" @@ -43,7 +42,7 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { return errors.New("nil diagnostic") } - var colorCode, resetCode string + var colorCode, highlightCode, resetCode string if w.color { switch diag.Severity { case DiagError: @@ -52,6 +51,7 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { colorCode = "\x1b[33m" } resetCode = "\x1b[0m" + highlightCode = "\x1b[1;4m" } var severityStr string @@ -68,24 +68,31 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary) if diag.Subject != nil { + snipRange := *diag.Subject + highlightRange := snipRange + if diag.Context != nil { + // Show enough of the source code to include both the subject + // and context ranges, which overlap in all reasonable + // situations. + snipRange = RangeOver(snipRange, *diag.Context) + } + // We can't illustrate an empty range, so we'll turn such ranges into + // single-character ranges, which might not be totally valid (may point + // off the end of a line, or off the end of the file) but are good + // enough for the bounds checks we do below. + if snipRange.Empty() { + snipRange.End.Byte++ + snipRange.End.Column++ + } + if highlightRange.Empty() { + highlightRange.End.Byte++ + highlightRange.End.Column++ + } file := w.files[diag.Subject.Filename] if file == nil || file.Bytes == nil { fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line) } else { - src := file.Bytes - r := bytes.NewReader(src) - sc := bufio.NewScanner(r) - sc.Split(bufio.ScanLines) - - var startLine, endLine int - if diag.Context != nil { - startLine = diag.Context.Start.Line - endLine = diag.Context.End.Line - } else { - startLine = diag.Subject.Start.Line - endLine = diag.Subject.End.Line - } var contextLine string if diag.Subject != nil { @@ -95,35 +102,33 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error { } } - li := 1 - var ls string - for sc.Scan() { - ls = sc.Text() - - if li == startLine { - break - } - li++ - } - fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine) - // TODO: Generate markers for the specific characters that are in the Context and Subject ranges. - // For now, we just print out the lines. + src := file.Bytes + sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines) - fmt.Fprintf(w.wr, "%4d: %s\n", li, ls) - - if endLine > li { - for sc.Scan() { - ls = sc.Text() - li++ - - fmt.Fprintf(w.wr, "%4d: %s\n", li, ls) - - if li == endLine { - break - } + for sc.Scan() { + lineRange := sc.Range() + if !lineRange.Overlaps(snipRange) { + continue } + + beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange) + if highlightedRange.Empty() { + fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes()) + } else { + before := beforeRange.SliceBytes(src) + highlighted := highlightedRange.SliceBytes(src) + after := afterRange.SliceBytes(src) + fmt.Fprintf( + w.wr, "%4d: %s%s%s%s%s\n", + lineRange.Start.Line, + before, + highlightCode, highlighted, resetCode, + after, + ) + } + } w.wr.Write([]byte{'\n'}) diff --git a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text_test.go b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text_test.go index 40c03df..67c1e64 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text_test.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/diagnostic_text_test.go @@ -45,10 +45,12 @@ All splines must be pre-reticulated. Detail: `"baz" is not a supported top-level attribute. Did you mean "bam"?`, Subject: &Range{ Start: Pos{ + Byte: 16, Column: 1, Line: 3, }, End: Pos{ + Byte: 19, Column: 4, Line: 3, }, @@ -71,10 +73,12 @@ attribute. Did you mean "bam"? Detail: `"pizza" is not a supported attribute. Did you mean "pizzetta"?`, Subject: &Range{ Start: Pos{ + Byte: 42, Column: 3, Line: 5, }, End: Pos{ + Byte: 47, Column: 8, Line: 5, }, @@ -83,10 +87,12 @@ attribute. Did you mean "bam"? // whether we're able to show a multi-line context when needed. Context: &Range{ Start: Pos{ + Byte: 24, Column: 1, Line: 4, }, End: Pos{ + Byte: 60, Column: 2, Line: 6, }, diff --git a/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go new file mode 100644 index 0000000..b06b197 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/expr_list.go @@ -0,0 +1,30 @@ +package hcl + +// ExprList tests if the given expression is a static list construct and, +// if so, extracts the expressions that represent the list elements. +// If the given expression is not a static list, error diagnostics are +// returned. +// +// A particular Expression implementation can support this function by +// offering a method called ExprList that takes no arguments and returns +// []Expression. This method should return nil if a static list cannot +// be extracted. +func ExprList(expr Expression) ([]Expression, Diagnostics) { + type exprList interface { + ExprList() []Expression + } + + if exL, supported := expr.(exprList); supported { + if list := exL.ExprList(); list != nil { + return list, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static list expression is required.", + Subject: expr.StartRange().Ptr(), + }, + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go index e90ac2b..58b6b15 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression.go @@ -70,6 +70,11 @@ func (e *ScopeTraversalExpr) StartRange() hcl.Range { return e.SrcRange } +// Implementation for hcl.AbsTraversalForExpr. +func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal { + return e.Traversal +} + // RelativeTraversalExpr is an Expression that retrieves a value from another // value using a _relative_ traversal. type RelativeTraversalExpr struct { @@ -539,6 +544,15 @@ func (e *TupleConsExpr) StartRange() hcl.Range { return e.OpenRange } +// Implementation for hcl.ExprList +func (e *TupleConsExpr) ExprList() []hcl.Expression { + ret := make([]hcl.Expression, len(e.Exprs)) + for i, expr := range e.Exprs { + ret[i] = expr + } + return ret +} + type ObjectConsExpr struct { Items []ObjectConsItem diff --git a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_test.go b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_test.go index f290adf..1b0030d 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_test.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/hclsyntax/expression_test.go @@ -1087,3 +1087,35 @@ func TestFunctionCallExprValue(t *testing.T) { }) } } + +func TestExpressionAsTraversal(t *testing.T) { + expr, _ := ParseExpression([]byte("a.b[0]"), "", hcl.Pos{}) + traversal, diags := hcl.AbsTraversalForExpr(expr) + if len(diags) != 0 { + t.Fatalf("unexpected diagnostics") + } + if len(traversal) != 3 { + t.Fatalf("wrong traversal %#v; want length 3", traversal) + } + if traversal.RootName() != "a" { + t.Fatalf("wrong root name %q; want %q", traversal.RootName(), "a") + } +} + +func TestStaticExpressionList(t *testing.T) { + expr, _ := ParseExpression([]byte("[0, a, true]"), "", hcl.Pos{}) + exprs, diags := hcl.ExprList(expr) + if len(diags) != 0 { + t.Fatalf("unexpected diagnostics") + } + if len(exprs) != 3 { + t.Fatalf("wrong result %#v; want length 3", exprs) + } + first, ok := exprs[0].(*LiteralValueExpr) + if !ok { + t.Fatalf("first expr has wrong type %T; want *zclsyntax.LiteralValueExpr", exprs[0]) + } + if !first.Val.RawEquals(cty.Zero) { + t.Fatalf("wrong first value %#v; want cty.Zero", first.Val) + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go index d13607e..86716e7 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure.go @@ -3,8 +3,8 @@ package json import ( "fmt" - "github.com/hashicorp/hcl2/hcl/hclsyntax" "github.com/hashicorp/hcl2/hcl" + "github.com/hashicorp/hcl2/hcl/hclsyntax" "github.com/zclconf/go-cty/cty" ) @@ -330,8 +330,26 @@ func (e *expression) Variables() []hcl.Traversal { switch v := e.src.(type) { case *stringVal: - // FIXME: Once the native zcl template language parser is implemented, - // parse with that and look for variables in there too, + templateSrc := v.Value + expr, diags := hclsyntax.ParseTemplate( + []byte(templateSrc), + v.SrcRange.Filename, + + // This won't produce _exactly_ the right result, since + // the zclsyntax parser can't "see" any escapes we removed + // while parsing JSON, but it's better than nothing. + hcl.Pos{ + Line: v.SrcRange.Start.Line, + + // skip over the opening quote mark + Byte: v.SrcRange.Start.Byte + 1, + Column: v.SrcRange.Start.Column + 1, + }, + ) + if diags.HasErrors() { + return vars + } + return expr.Variables() case *arrayVal: for _, jsonVal := range v.Values { @@ -353,3 +371,34 @@ func (e *expression) Range() hcl.Range { func (e *expression) StartRange() hcl.Range { return e.src.StartRange() } + +// Implementation for hcl.AbsTraversalForExpr. +func (e *expression) AsTraversal() hcl.Traversal { + // In JSON-based syntax a traversal is given as a string containing + // traversal syntax as defined by hclsyntax.ParseTraversalAbs. + + switch v := e.src.(type) { + case *stringVal: + traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start) + if diags.HasErrors() { + return nil + } + return traversal + default: + return nil + } +} + +// Implementation for hcl.ExprList. +func (e *expression) ExprList() []hcl.Expression { + switch v := e.src.(type) { + case *arrayVal: + ret := make([]hcl.Expression, len(v.Values)) + for i, node := range v.Values { + ret[i] = &expression{src: node} + } + return ret + default: + return nil + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/json/structure_test.go b/vendor/github.com/hashicorp/hcl2/hcl/json/structure_test.go index 9caed58..7600e20 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/json/structure_test.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/json/structure_test.go @@ -741,3 +741,34 @@ func TestJustAttributes(t *testing.T) { }) } } + +func TestExpressionAsTraversal(t *testing.T) { + e := &expression{ + src: &stringVal{ + Value: "foo.bar[0]", + }, + } + traversal := e.AsTraversal() + if len(traversal) != 3 { + t.Fatalf("incorrect traversal %#v; want length 3", traversal) + } +} + +func TestStaticExpressionList(t *testing.T) { + e := &expression{ + src: &arrayVal{ + Values: []node{ + &stringVal{ + Value: "hello", + }, + }, + }, + } + exprs := e.ExprList() + if len(exprs) != 1 { + t.Fatalf("incorrect exprs %#v; want length 1", exprs) + } + if exprs[0].(*expression).src != e.src.(*arrayVal).Values[0] { + t.Fatalf("wrong first expression node") + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos.go b/vendor/github.com/hashicorp/hcl2/hcl/pos.go index 3ccdfac..1a4b329 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/pos.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos.go @@ -60,6 +60,40 @@ func RangeBetween(start, end Range) Range { } } +// RangeOver returns a new range that covers both of the given ranges and +// possibly additional content between them if the two ranges do not overlap. +// +// If either range is empty then it is ignored. The result is empty if both +// given ranges are empty. +// +// The result is meaningless if the two ranges to not belong to the same +// source file. +func RangeOver(a, b Range) Range { + if a.Empty() { + return b + } + if b.Empty() { + return a + } + + var start, end Pos + if a.Start.Byte < b.Start.Byte { + start = a.Start + } else { + start = b.Start + } + if a.End.Byte > b.End.Byte { + end = a.End + } else { + end = b.End + } + return Range{ + Filename: a.Filename, + Start: start, + End: end, + } +} + // ContainsOffset returns true if and only if the given byte offset is within // the receiving Range. func (r Range) ContainsOffset(offset int) bool { @@ -94,3 +128,135 @@ func (r Range) String() string { ) } } + +func (r Range) Empty() bool { + return r.Start.Byte == r.End.Byte +} + +// CanSliceBytes returns true if SliceBytes could return an accurate +// sub-slice of the given slice. +// +// This effectively tests whether the start and end offsets of the range +// are within the bounds of the slice, and thus whether SliceBytes can be +// trusted to produce an accurate start and end position within that slice. +func (r Range) CanSliceBytes(b []byte) bool { + switch { + case r.Start.Byte < 0 || r.Start.Byte > len(b): + return false + case r.End.Byte < 0 || r.End.Byte > len(b): + return false + case r.End.Byte < r.Start.Byte: + return false + default: + return true + } +} + +// SliceBytes returns a sub-slice of the given slice that is covered by the +// receiving range, assuming that the given slice is the source code of the +// file indicated by r.Filename. +// +// If the receiver refers to any byte offsets that are outside of the slice +// then the result is constrained to the overlapping portion only, to avoid +// a panic. Use CanSliceBytes to determine if the result is guaranteed to +// be an accurate span of the requested range. +func (r Range) SliceBytes(b []byte) []byte { + start := r.Start.Byte + end := r.End.Byte + if start < 0 { + start = 0 + } else if start > len(b) { + start = len(b) + } + if end < 0 { + end = 0 + } else if end > len(b) { + end = len(b) + } + if end < start { + end = start + } + return b[start:end] +} + +// Overlaps returns true if the receiver and the other given range share any +// characters in common. +func (r Range) Overlaps(other Range) bool { + switch { + case r.Filename != other.Filename: + // If the ranges are in different files then they can't possibly overlap + return false + case r.Empty() || other.Empty(): + // Empty ranges can never overlap + return false + case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte): + return true + case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte): + return true + default: + return false + } +} + +// Overlap finds a range that is either identical to or a sub-range of both +// the receiver and the other given range. It returns an empty range +// within the receiver if there is no overlap between the two ranges. +// +// A non-empty result is either identical to or a subset of the receiver. +func (r Range) Overlap(other Range) Range { + if !r.Overlaps(other) { + // Start == End indicates an empty range + return Range{ + Filename: r.Filename, + Start: r.Start, + End: r.Start, + } + } + + var start, end Pos + if r.Start.Byte > other.Start.Byte { + start = r.Start + } else { + start = other.Start + } + if r.End.Byte < other.End.Byte { + end = r.End + } else { + end = other.End + } + + return Range{ + Filename: r.Filename, + Start: start, + End: end, + } +} + +// PartitionAround finds the portion of the given range that overlaps with +// the reciever and returns three ranges: the portion of the reciever that +// precedes the overlap, the overlap itself, and then the portion of the +// reciever that comes after the overlap. +// +// If the two ranges do not overlap then all three returned ranges are empty. +// +// If the given range aligns with or extends beyond either extent of the +// reciever then the corresponding outer range will be empty. +func (r Range) PartitionAround(other Range) (before, overlap, after Range) { + overlap = r.Overlap(other) + if overlap.Empty() { + return overlap, overlap, overlap + } + + before = Range{ + Filename: r.Filename, + Start: r.Start, + End: overlap.Start, + } + after = Range{ + Filename: r.Filename, + Start: overlap.End, + End: r.End, + } + + return before, overlap, after +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go new file mode 100644 index 0000000..7c8f2df --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go @@ -0,0 +1,148 @@ +package hcl + +import ( + "bufio" + "bytes" + + "github.com/apparentlymart/go-textseg/textseg" +) + +// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc +// and visit a source range for each token matched. +// +// For example, this can be used with bufio.ScanLines to find the source range +// for each line in the file, skipping over the actual newline characters, which +// may be useful when printing source code snippets as part of diagnostic +// messages. +// +// The line and column information in the returned ranges is produced by +// counting newline characters and grapheme clusters respectively, which +// mimics the behavior we expect from a parser when producing ranges. +type RangeScanner struct { + filename string + b []byte + cb bufio.SplitFunc + + pos Pos // position of next byte to process in b + cur Range // latest range + tok []byte // slice of b that is covered by cur + err error // error from last scan, if any +} + +// Create a new RangeScanner for the given buffer, producing ranges for the +// given filename. +// +// Since ranges have grapheme-cluster granularity rather than byte granularity, +// the scanner will produce incorrect results if the given SplitFunc creates +// tokens between grapheme cluster boundaries. In particular, it is incorrect +// to use RangeScanner with bufio.ScanRunes because it will produce tokens +// around individual UTF-8 sequences, which will split any multi-sequence +// grapheme clusters. +func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner { + return &RangeScanner{ + filename: filename, + b: b, + cb: cb, + pos: Pos{ + Byte: 0, + Line: 1, + Column: 1, + }, + } +} + +func (sc *RangeScanner) Scan() bool { + if sc.pos.Byte >= len(sc.b) || sc.err != nil { + // All done + return false + } + + // Since we're operating on an in-memory buffer, we always pass the whole + // remainder of the buffer to our SplitFunc and set isEOF to let it know + // that it has the whole thing. + advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true) + + // Since we are setting isEOF to true this should never happen, but + // if it does we will just abort and assume the SplitFunc is misbehaving. + if advance == 0 && token == nil && err == nil { + return false + } + + if err != nil { + sc.err = err + sc.cur = Range{ + Filename: sc.filename, + Start: sc.pos, + End: sc.pos, + } + sc.tok = nil + return false + } + + sc.tok = token + start := sc.pos + end := sc.pos + new := sc.pos + + // adv is similar to token but it also includes any subsequent characters + // we're being asked to skip over by the SplitFunc. + // adv is a slice covering any additional bytes we are skipping over, based + // on what the SplitFunc told us to do with advance. + adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance] + + // We now need to scan over our token to count the grapheme clusters + // so we can correctly advance Column, and count the newlines so we + // can correctly advance Line. + advR := bytes.NewReader(adv) + gsc := bufio.NewScanner(advR) + advanced := 0 + gsc.Split(textseg.ScanGraphemeClusters) + for gsc.Scan() { + gr := gsc.Bytes() + new.Byte += len(gr) + new.Column++ + + // We rely here on the fact that \r\n is considered a grapheme cluster + // and so we don't need to worry about miscounting additional lines + // on files with Windows-style line endings. + if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') { + new.Column = 1 + new.Line++ + } + + if advanced < len(token) { + // If we've not yet found the end of our token then we'll + // also push our "end" marker along. + // (if advance > len(token) then we'll stop moving "end" early + // so that the caller only sees the range covered by token.) + end = new + } + advanced += len(gr) + } + + sc.cur = Range{ + Filename: sc.filename, + Start: start, + End: end, + } + sc.pos = new + return true +} + +// Range returns a range that covers the latest token obtained after a call +// to Scan returns true. +func (sc *RangeScanner) Range() Range { + return sc.cur +} + +// Bytes returns the slice of the input buffer that is covered by the range +// that would be returned by Range. +func (sc *RangeScanner) Bytes() []byte { + return sc.tok +} + +// Err can be called after Scan returns false to determine if the latest read +// resulted in an error, and obtain that error if so. +func (sc *RangeScanner) Err() error { + return sc.err +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner_test.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner_test.go new file mode 100644 index 0000000..b0dc902 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_scanner_test.go @@ -0,0 +1,193 @@ +package hcl + +import ( + "bufio" + "reflect" + "testing" + + "github.com/davecgh/go-spew/spew" +) + +func TestPosScanner(t *testing.T) { + tests := map[string]struct { + Input string + Want []Range + WantToks [][]byte + }{ + "empty": { + "", + []Range{}, + [][]byte{}, + }, + "single line": { + "hello", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + [][]byte{ + []byte("hello"), + }, + }, + "single line with trailing UNIX newline": { + "hello\n", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + [][]byte{ + []byte("hello"), + }, + }, + "single line with trailing Windows newline": { + "hello\r\n", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + [][]byte{ + []byte("hello"), + }, + }, + "two lines with UNIX newline": { + "hello\nworld", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + { + Start: Pos{Byte: 6, Line: 2, Column: 1}, + End: Pos{Byte: 11, Line: 2, Column: 6}, + }, + }, + [][]byte{ + []byte("hello"), + []byte("world"), + }, + }, + "two lines with Windows newline": { + "hello\r\nworld", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + { + Start: Pos{Byte: 7, Line: 2, Column: 1}, + End: Pos{Byte: 12, Line: 2, Column: 6}, + }, + }, + [][]byte{ + []byte("hello"), + []byte("world"), + }, + }, + "blank line with UNIX newlines": { + "hello\n\nworld", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + { + Start: Pos{Byte: 6, Line: 2, Column: 1}, + End: Pos{Byte: 6, Line: 2, Column: 1}, + }, + { + Start: Pos{Byte: 7, Line: 3, Column: 1}, + End: Pos{Byte: 12, Line: 3, Column: 6}, + }, + }, + [][]byte{ + []byte("hello"), + []byte(""), + []byte("world"), + }, + }, + "blank line with Windows newlines": { + "hello\r\n\r\nworld", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + { + Start: Pos{Byte: 7, Line: 2, Column: 1}, + End: Pos{Byte: 7, Line: 2, Column: 1}, + }, + { + Start: Pos{Byte: 9, Line: 3, Column: 1}, + End: Pos{Byte: 14, Line: 3, Column: 6}, + }, + }, + [][]byte{ + []byte("hello"), + []byte(""), + []byte("world"), + }, + }, + "two lines with combiner and UNIX newline": { + "foo \U0001f469\U0001f3ff bar\nbaz", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 16, Line: 1, Column: 10}, + }, + { + Start: Pos{Byte: 17, Line: 2, Column: 1}, + End: Pos{Byte: 20, Line: 2, Column: 4}, + }, + }, + [][]byte{ + []byte("foo \U0001f469\U0001f3ff bar"), + []byte("baz"), + }, + }, + "two lines with combiner and Windows newline": { + "foo \U0001f469\U0001f3ff bar\r\nbaz", + []Range{ + { + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 16, Line: 1, Column: 10}, + }, + { + Start: Pos{Byte: 18, Line: 2, Column: 1}, + End: Pos{Byte: 21, Line: 2, Column: 4}, + }, + }, + [][]byte{ + []byte("foo \U0001f469\U0001f3ff bar"), + []byte("baz"), + }, + }, + } + + for name, test := range tests { + t.Run(name, func(t *testing.T) { + src := []byte(test.Input) + sc := NewRangeScanner(src, "", bufio.ScanLines) + got := make([]Range, 0) + gotToks := make([][]byte, 0) + for sc.Scan() { + got = append(got, sc.Range()) + gotToks = append(gotToks, sc.Bytes()) + } + if sc.Err() != nil { + t.Fatalf("unexpected error: %s", sc.Err()) + } + if !reflect.DeepEqual(got, test.Want) { + t.Errorf("incorrect ranges\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(test.Want)) + } + if !reflect.DeepEqual(gotToks, test.WantToks) { + t.Errorf("incorrect tokens\ngot: %swant: %s", spew.Sdump(gotToks), spew.Sdump(test.WantToks)) + } + }) + } +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/pos_test.go b/vendor/github.com/hashicorp/hcl2/hcl/pos_test.go new file mode 100644 index 0000000..cfa4137 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/pos_test.go @@ -0,0 +1,467 @@ +package hcl + +import ( + "bytes" + "fmt" + "reflect" + "testing" +) + +func TestRangeOver(t *testing.T) { + tests := []struct { + A Range + B Range + Want Range + }{ + { + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ##### + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ##### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // ### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // ### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // ## + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 2, Line: 1, Column: 3}, + }, + Range{ // ## + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + Range{ // ###### + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + }, + { + Range{ // ## + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + Range{ // ## + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 2, Line: 1, Column: 3}, + }, + Range{ // ###### + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s<=>%s", test.A, test.B), func(t *testing.T) { + got := RangeOver(test.A, test.B) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s", + visRangeOffsets(test.A), test.A, + visRangeOffsets(test.B), test.B, + visRangeOffsets(got), got, + visRangeOffsets(test.Want), test.Want, + ) + } + }) + } +} + +func TestPosOverlap(t *testing.T) { + tests := []struct { + A Range + B Range + Want Range + }{ + { + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + }, + { + Range{ // ### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + }, + { + Range{ // ### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // ## + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 2, Line: 1, Column: 3}, + }, + Range{ // ## + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + Range{ // (no overlap) + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 0, Line: 1, Column: 1}, + }, + }, + { + Range{ // ## + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 6, Line: 1, Column: 7}, + }, + Range{ // ## + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 2, Line: 1, Column: 3}, + }, + Range{ // (no overlap) + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s<=>%s", test.A, test.B), func(t *testing.T) { + got := test.A.Overlap(test.B) + if !reflect.DeepEqual(got, test.Want) { + t.Errorf( + "wrong result\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s", + visRangeOffsets(test.A), test.A, + visRangeOffsets(test.B), test.B, + visRangeOffsets(got), got, + visRangeOffsets(test.Want), test.Want, + ) + } + }) + } +} + +func TestRangePartitionAround(t *testing.T) { + tests := []struct { + Outer Range + Inner Range + WantBefore Range + WantOverlap Range + WantAfter Range + }{ + { + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // (empty) + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 2, Line: 1, Column: 3}, + }, + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // (empty) + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // # + Start: Pos{Byte: 0, Line: 1, Column: 1}, + End: Pos{Byte: 1, Line: 1, Column: 2}, + }, + Range{ // ### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // (empty) + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // (empty) + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 2, Line: 1, Column: 3}, + }, + Range{ // ### + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // # + Start: Pos{Byte: 5, Line: 1, Column: 6}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + { + Range{ // #### + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // # + Start: Pos{Byte: 1, Line: 1, Column: 2}, + End: Pos{Byte: 2, Line: 1, Column: 3}, + }, + Range{ // ## + Start: Pos{Byte: 2, Line: 1, Column: 3}, + End: Pos{Byte: 4, Line: 1, Column: 5}, + }, + Range{ // # + Start: Pos{Byte: 4, Line: 1, Column: 5}, + End: Pos{Byte: 5, Line: 1, Column: 6}, + }, + }, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("%s around %s", test.Outer, test.Inner), func(t *testing.T) { + gotBefore, gotOverlap, gotAfter := test.Outer.PartitionAround(test.Inner) + if !reflect.DeepEqual(gotBefore, test.WantBefore) { + t.Errorf( + "wrong before\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s", + visRangeOffsets(test.Outer), test.Outer, + visRangeOffsets(test.Inner), test.Inner, + visRangeOffsets(gotBefore), gotBefore, + visRangeOffsets(test.WantBefore), test.WantBefore, + ) + } + if !reflect.DeepEqual(gotOverlap, test.WantOverlap) { + t.Errorf( + "wrong overlap\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s", + visRangeOffsets(test.Outer), test.Outer, + visRangeOffsets(test.Inner), test.Inner, + visRangeOffsets(gotOverlap), gotOverlap, + visRangeOffsets(test.WantOverlap), test.WantOverlap, + ) + } + if !reflect.DeepEqual(gotAfter, test.WantAfter) { + t.Errorf( + "wrong after\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s", + visRangeOffsets(test.Outer), test.Outer, + visRangeOffsets(test.Inner), test.Inner, + visRangeOffsets(gotAfter), gotAfter, + visRangeOffsets(test.WantAfter), test.WantAfter, + ) + } + }) + } +} + +// visRangeOffsets is a helper that produces a visual representation of the +// start and end byte offsets of the given range, which can then be stacked +// with the same for other ranges to more easily see how the ranges relate +// to one another. +func visRangeOffsets(rng Range) string { + var buf bytes.Buffer + if rng.End.Byte < rng.Start.Byte { + // Should never happen, but we'll visualize it anyway so we can + // more easily debug failing tests. + for i := 0; i < rng.End.Byte; i++ { + buf.WriteByte(' ') + } + for i := rng.End.Byte; i < rng.Start.Byte; i++ { + buf.WriteByte('!') + } + return buf.String() + } + + for i := 0; i < rng.Start.Byte; i++ { + buf.WriteByte(' ') + } + for i := rng.Start.Byte; i < rng.End.Byte; i++ { + buf.WriteByte('#') + } + return buf.String() +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go index 867ed42..24f4c91 100644 --- a/vendor/github.com/hashicorp/hcl2/hcl/traversal.go +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal.go @@ -156,6 +156,17 @@ func (t Traversal) RootName() string { return t[0].(TraverseRoot).Name } +// SourceRange returns the source range for the traversal. +func (t Traversal) SourceRange() Range { + if len(t) == 0 { + // Nothing useful to return here, but we'll return something + // that's correctly-typed at least. + return Range{} + } + + return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange()) +} + // TraversalSplit represents a pair of traversals, the first of which is // an absolute traversal and the second of which is relative to the first. // @@ -206,6 +217,7 @@ func (t TraversalSplit) RootName() string { // A Traverser is a step within a Traversal. type Traverser interface { TraversalStep(cty.Value) (cty.Value, Diagnostics) + SourceRange() Range isTraverserSigil() isTraverser } @@ -231,6 +243,10 @@ func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) { panic("Cannot traverse an absolute traversal") } +func (tn TraverseRoot) SourceRange() Range { + return tn.SrcRange +} + // TraverseAttr looks up an attribute in its initial value. type TraverseAttr struct { isTraverser @@ -301,6 +317,10 @@ func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { } } +func (tn TraverseAttr) SourceRange() Range { + return tn.SrcRange +} + // TraverseIndex applies the index operation to its initial value. type TraverseIndex struct { isTraverser @@ -312,6 +332,10 @@ func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { return Index(val, tn.Key, &tn.SrcRange) } +func (tn TraverseIndex) SourceRange() Range { + return tn.SrcRange +} + // TraverseSplat applies the splat operation to its initial value. type TraverseSplat struct { isTraverser @@ -322,3 +346,7 @@ type TraverseSplat struct { func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) { panic("TraverseSplat not yet implemented") } + +func (tn TraverseSplat) SourceRange() Range { + return tn.SrcRange +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go new file mode 100644 index 0000000..4d2bd47 --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr.go @@ -0,0 +1,55 @@ +package hcl + +// AbsTraversalForExpr attempts to interpret the given expression as +// an absolute traversal, or returns error diagnostic(s) if that is +// not possible for the given expression. +// +// A particular Expression implementation can support this function by +// offering a method called AsTraversal that takes no arguments and +// returns either a valid absolute traversal or nil to indicate that +// no traversal is possible. +// +// In most cases the calling application is interested in the value +// that results from an expression, but in rarer cases the application +// needs to see the the name of the variable and subsequent +// attributes/indexes itself, for example to allow users to give references +// to the variables themselves rather than to their values. An implementer +// of this function should at least support attribute and index steps. +func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + type asTraversal interface { + AsTraversal() Traversal + } + + if asT, supported := expr.(asTraversal); supported { + if traversal := asT.AsTraversal(); traversal != nil { + return traversal, nil + } + } + return nil, Diagnostics{ + &Diagnostic{ + Severity: DiagError, + Summary: "Invalid expression", + Detail: "A static variable reference is required.", + Subject: expr.Range().Ptr(), + }, + } +} + +// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns +// a relative traversal instead. Due to the nature of ZCL expressions, the +// first element of the returned traversal is always a TraverseAttr, and +// then it will be followed by zero or more other expressions. +// +// Any expression accepted by AbsTraversalForExpr is also accepted by +// RelTraversalForExpr. +func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) { + traversal, diags := AbsTraversalForExpr(expr) + if len(traversal) > 0 { + root := traversal[0].(TraverseRoot) + traversal[0] = TraverseAttr{ + Name: root.Name, + SrcRange: root.SrcRange, + } + } + return traversal, diags +} diff --git a/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr_test.go b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr_test.go new file mode 100644 index 0000000..c6ba29a --- /dev/null +++ b/vendor/github.com/hashicorp/hcl2/hcl/traversal_for_expr_test.go @@ -0,0 +1,128 @@ +package hcl + +import ( + "testing" +) + +type asTraversalSupported struct { + staticExpr + RootName string +} + +type asTraversalNotSupported struct { + staticExpr +} + +type asTraversalDeclined struct { + staticExpr +} + +func (e asTraversalSupported) AsTraversal() Traversal { + return Traversal{ + TraverseRoot{ + Name: e.RootName, + }, + } +} + +func (e asTraversalDeclined) AsTraversal() Traversal { + return nil +} + +func TestAbsTraversalForExpr(t *testing.T) { + tests := []struct { + Expr Expression + WantRootName string + }{ + { + asTraversalSupported{RootName: "foo"}, + "foo", + }, + { + asTraversalNotSupported{}, + "", + }, + { + asTraversalDeclined{}, + "", + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + got, diags := AbsTraversalForExpr(test.Expr) + switch { + case got != nil: + if test.WantRootName == "" { + t.Fatalf("traversal was returned; want error") + } + if len(got) != 1 { + t.Fatalf("wrong traversal length %d; want 1", len(got)) + } + gotRoot, ok := got[0].(TraverseRoot) + if !ok { + t.Fatalf("first traversal step is %T; want hcl.TraverseRoot", got[0]) + } + if gotRoot.Name != test.WantRootName { + t.Errorf("wrong root name %q; want %q", gotRoot.Name, test.WantRootName) + } + default: + if !diags.HasErrors() { + t.Errorf("returned nil traversal without error diagnostics") + } + if test.WantRootName != "" { + t.Errorf("traversal was not returned; want TraverseRoot(%q)", test.WantRootName) + } + } + }) + } +} + +func TestRelTraversalForExpr(t *testing.T) { + tests := []struct { + Expr Expression + WantFirstName string + }{ + { + asTraversalSupported{RootName: "foo"}, + "foo", + }, + { + asTraversalNotSupported{}, + "", + }, + { + asTraversalDeclined{}, + "", + }, + } + + for _, test := range tests { + t.Run("", func(t *testing.T) { + got, diags := RelTraversalForExpr(test.Expr) + switch { + case got != nil: + if test.WantFirstName == "" { + t.Fatalf("traversal was returned; want error") + } + if len(got) != 1 { + t.Fatalf("wrong traversal length %d; want 1", len(got)) + } + gotRoot, ok := got[0].(TraverseAttr) + if !ok { + t.Fatalf("first traversal step is %T; want hcl.TraverseAttr", got[0]) + } + if gotRoot.Name != test.WantFirstName { + t.Errorf("wrong root name %q; want %q", gotRoot.Name, test.WantFirstName) + } + default: + if !diags.HasErrors() { + t.Errorf("returned nil traversal without error diagnostics") + } + if test.WantFirstName != "" { + t.Errorf("traversal was not returned; want TraverseAttr(%q)", test.WantFirstName) + } + } + }) + } +} diff --git a/vendor/github.com/hashicorp/terraform/CHANGELOG.md b/vendor/github.com/hashicorp/terraform/CHANGELOG.md index ab04565..dc68029 100644 --- a/vendor/github.com/hashicorp/terraform/CHANGELOG.md +++ b/vendor/github.com/hashicorp/terraform/CHANGELOG.md @@ -1,3 +1,37 @@ +## 0.11.2 (January 9, 2018) + +BACKWARDS INCOMPATIBILITIES / NOTES: + +* backend/gcs: The gcs remote state backend was erroneously creating the state bucket if it didn't exist. This is not the intended behavior of backends, as Terraform cannot track or manage that resource. The target bucket must now be created separately, before using it with Terraform. ([#16865](https://github.com/hashicorp/terraform/issues/16865)) + +NEW FEATURES: + +* **[Habitat](https://www.habitat.sh/) Provisioner** allowing automatic installation of the Habitat agent ([#16280](https://github.com/hashicorp/terraform/issues/16280)) + +IMPROVEMENTS: + +* core: removed duplicate prompts and clarified working when migration backend configurations ([#16939](https://github.com/hashicorp/terraform/issues/16939)) +* config: new `rsadecrypt` interpolation function allows decrypting a base64-encoded ciphertext using a given private key. This is particularly useful for decrypting the password for a Windows instance on AWS EC2, but is generic and may find other uses too. ([#16647](https://github.com/hashicorp/terraform/issues/16647)) +* config: new `timeadd` interpolation function allows calculating a new timestamp relative to an existing known timestamp. ([#16644](https://github.com/hashicorp/terraform/issues/16644)) +* cli: Passing an empty string to `-plugin-dir` during init will remove previously saved paths ([#16969](https://github.com/hashicorp/terraform/issues/16969)) +* cli: Module and provider installation (and some other Terraform features) now implement [RFC6555](https://tools.ietf.org/html/rfc6555) when making outgoing HTTP requests, which should improve installation reliability for dual-stack (both IPv4 and IPv6) hosts running on networks that have non-performant or broken IPv6 Internet connectivity by trying both IPv4 and IPv6 connections. ([#16805](https://github.com/hashicorp/terraform/issues/16805)) +* backend/s3: it is now possible to disable the region check, for improved compatibility with third-party services that attempt to mimic the S3 API. ([#16757](https://github.com/hashicorp/terraform/issues/16757)) +* backend/s3: it is now possible to for the path-based S3 API form, for improved compatibility with third-party services that attempt to mimic the S3 API. ([#17001](https://github.com/hashicorp/terraform/issues/17001)) +* backend/s3: it is now possible to use named credentials from the `~/.aws/credentials` file, similarly to the AWS plugin ([#16661](https://github.com/hashicorp/terraform/issues/16661)) +* backend/manta: support for Triton RBAC ([#17003](https://github.com/hashicorp/terraform/issues/17003)) +* backend/gcs: support for customer-supplied encryption keys for remote state buckets ([#16936](https://github.com/hashicorp/terraform/issues/16936)) +* provider/terraform: in `terraform_remote_state`, the argument `environment` is now deprecated in favor of `workspace`. The `environment` argument will be removed in a later Terraform release. ([#16558](https://github.com/hashicorp/terraform/issues/16558)) + +BUG FIXES: + +* config: fixed crash in `substr` interpolation function with invalid offset ([#17043](https://github.com/hashicorp/terraform/issues/17043)) +* config: Referencing a count attribute in an output no longer generates a warning ([#16866](https://github.com/hashicorp/terraform/issues/16866)) +* cli: Terraform will no longer crash when `terraform plan`, `terraform apply`, and some other commands encounter an invalid provider version constraint in configuration, generating a proper error message instead. ([#16867](https://github.com/hashicorp/terraform/issues/16867)) +* backend/gcs: The usage of the GOOGLE_CREDENTIALS environment variable now matches that of the google provider ([#16865](https://github.com/hashicorp/terraform/issues/16865)) +* backend/gcs: fixed the locking methodology to avoid "double-locking" issues when used with the `terraform_remote_state` data source ([#16852](https://github.com/hashicorp/terraform/issues/16852)) +* backend/s3: the `workspace_key_prefix` can now be an empty string or contain slashes ([#16932](https://github.com/hashicorp/terraform/issues/16932)) +* provisioner/salt-masterless: now waits for all of the remote operations to complete before returning ([#16704](https://github.com/hashicorp/terraform/issues/16704)) + ## 0.11.1 (November 30, 2017) IMPROVEMENTS: diff --git a/vendor/github.com/hashicorp/terraform/commands.go b/vendor/github.com/hashicorp/terraform/commands.go index d65437f..3335d2c 100644 --- a/vendor/github.com/hashicorp/terraform/commands.go +++ b/vendor/github.com/hashicorp/terraform/commands.go @@ -63,6 +63,8 @@ func initCommands(config *Config) { RunningInAutomation: inAutomation, PluginCacheDir: config.PluginCacheDir, OverrideDataDir: dataDir, + + ShutdownCh: makeShutdownCh(), } // The command list is included in the terraform -help @@ -80,23 +82,20 @@ func initCommands(config *Config) { Commands = map[string]cli.CommandFactory{ "apply": func() (cli.Command, error) { return &command.ApplyCommand{ - Meta: meta, - ShutdownCh: makeShutdownCh(), + Meta: meta, }, nil }, "console": func() (cli.Command, error) { return &command.ConsoleCommand{ - Meta: meta, - ShutdownCh: makeShutdownCh(), + Meta: meta, }, nil }, "destroy": func() (cli.Command, error) { return &command.ApplyCommand{ - Meta: meta, - Destroy: true, - ShutdownCh: makeShutdownCh(), + Meta: meta, + Destroy: true, }, nil }, diff --git a/vendor/github.com/hashicorp/terraform/config/config.go b/vendor/github.com/hashicorp/terraform/config/config.go index c89f2ed..055a7f3 100644 --- a/vendor/github.com/hashicorp/terraform/config/config.go +++ b/vendor/github.com/hashicorp/terraform/config/config.go @@ -8,6 +8,7 @@ import ( "strconv" "strings" + hcl2 "github.com/hashicorp/hcl2/hcl" "github.com/hashicorp/hil/ast" "github.com/hashicorp/terraform/helper/hilmapstructure" "github.com/hashicorp/terraform/plugin/discovery" @@ -415,10 +416,17 @@ func (c *Config) Validate() tfdiags.Diagnostics { if p.Version != "" { _, err := discovery.ConstraintStr(p.Version).Parse() if err != nil { - diags = diags.Append(fmt.Errorf( - "provider.%s: invalid version constraint %q: %s", - name, p.Version, err, - )) + diags = diags.Append(&hcl2.Diagnostic{ + Severity: hcl2.DiagError, + Summary: "Invalid provider version constraint", + Detail: fmt.Sprintf( + "The value %q given for provider.%s is not a valid version constraint.", + p.Version, name, + ), + // TODO: include a "Subject" source reference in here, + // once the config loader is able to retain source + // location information. + }) } } @@ -849,7 +857,7 @@ func (c *Config) Validate() tfdiags.Diagnostics { // a count might dynamically be set to something // other than 1 and thus splat syntax is still needed // to be safe. - if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" { + if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" { diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf( "output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances", o.Name, diff --git a/vendor/github.com/hashicorp/terraform/config/config_test.go b/vendor/github.com/hashicorp/terraform/config/config_test.go index 18bab3f..6f6e7a4 100644 --- a/vendor/github.com/hashicorp/terraform/config/config_test.go +++ b/vendor/github.com/hashicorp/terraform/config/config_test.go @@ -217,7 +217,7 @@ func TestConfigValidate_table(t *testing.T) { "provider with invalid version constraint", "provider-version-invalid", true, - "invalid version constraint", + "not a valid version constraint", }, { "invalid provider name in module block", diff --git a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go index d9253a2..6cb9313 100644 --- a/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go +++ b/vendor/github.com/hashicorp/terraform/config/configschema/nestingmode_string.go @@ -2,7 +2,7 @@ package configschema -import "fmt" +import "strconv" const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap" @@ -10,7 +10,7 @@ var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62} func (i NestingMode) String() string { if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) { - return fmt.Sprintf("NestingMode(%d)", i) + return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")" } return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go index 94894ff..72be817 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs.go @@ -4,12 +4,15 @@ import ( "bytes" "compress/gzip" "crypto/md5" + "crypto/rsa" "crypto/sha1" "crypto/sha256" "crypto/sha512" + "crypto/x509" "encoding/base64" "encoding/hex" "encoding/json" + "encoding/pem" "fmt" "io/ioutil" "math" @@ -103,6 +106,7 @@ func Funcs() map[string]ast.Function { "pow": interpolationFuncPow(), "uuid": interpolationFuncUUID(), "replace": interpolationFuncReplace(), + "rsadecrypt": interpolationFuncRsaDecrypt(), "sha1": interpolationFuncSha1(), "sha256": interpolationFuncSha256(), "sha512": interpolationFuncSha512(), @@ -112,6 +116,7 @@ func Funcs() map[string]ast.Function { "split": interpolationFuncSplit(), "substr": interpolationFuncSubstr(), "timestamp": interpolationFuncTimestamp(), + "timeadd": interpolationFuncTimeAdd(), "title": interpolationFuncTitle(), "transpose": interpolationFuncTranspose(), "trimspace": interpolationFuncTrimSpace(), @@ -1504,6 +1509,29 @@ func interpolationFuncTimestamp() ast.Function { } } +func interpolationFuncTimeAdd() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ + ast.TypeString, // input timestamp string in RFC3339 format + ast.TypeString, // duration to add to input timestamp that should be parsable by time.ParseDuration + }, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + + ts, err := time.Parse(time.RFC3339, args[0].(string)) + if err != nil { + return nil, err + } + duration, err := time.ParseDuration(args[1].(string)) + if err != nil { + return nil, err + } + + return ts.Add(duration).Format(time.RFC3339), nil + }, + } +} + // interpolationFuncTitle implements the "title" function that returns a copy of the // string in which first characters of all the words are capitalized. func interpolationFuncTitle() ast.Function { @@ -1549,7 +1577,7 @@ func interpolationFuncSubstr() ast.Function { return nil, fmt.Errorf("length should be a non-negative integer") } - if offset > len(str) { + if offset > len(str) || offset < 0 { return nil, fmt.Errorf("offset cannot be larger than the length of the string") } @@ -1657,3 +1685,43 @@ func interpolationFuncAbs() ast.Function { }, } } + +// interpolationFuncRsaDecrypt implements the "rsadecrypt" function that does +// RSA decryption. +func interpolationFuncRsaDecrypt() ast.Function { + return ast.Function{ + ArgTypes: []ast.Type{ast.TypeString, ast.TypeString}, + ReturnType: ast.TypeString, + Callback: func(args []interface{}) (interface{}, error) { + s := args[0].(string) + key := args[1].(string) + + b, err := base64.StdEncoding.DecodeString(s) + if err != nil { + return "", fmt.Errorf("Failed to decode input %q: cipher text must be base64-encoded", key) + } + + block, _ := pem.Decode([]byte(key)) + if block == nil { + return "", fmt.Errorf("Failed to read key %q: no key found", key) + } + if block.Headers["Proc-Type"] == "4,ENCRYPTED" { + return "", fmt.Errorf( + "Failed to read key %q: password protected keys are\n"+ + "not supported. Please decrypt the key prior to use.", key) + } + + x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes) + if err != nil { + return "", err + } + + out, err := rsa.DecryptPKCS1v15(nil, x509Key, b) + if err != nil { + return "", err + } + + return string(out), nil + }, + } +} diff --git a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs_test.go b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs_test.go index fc7540e..f63d81c 100644 --- a/vendor/github.com/hashicorp/terraform/config/interpolate_funcs_test.go +++ b/vendor/github.com/hashicorp/terraform/config/interpolate_funcs_test.go @@ -2426,6 +2426,38 @@ func TestInterpolateFuncTimestamp(t *testing.T) { } } +func TestInterpolateFuncTimeAdd(t *testing.T) { + testFunction(t, testFunctionConfig{ + Cases: []testFunctionCase{ + { + `${timeadd("2017-11-22T00:00:00Z", "1s")}`, + "2017-11-22T00:00:01Z", + false, + }, + { + `${timeadd("2017-11-22T00:00:00Z", "10m1s")}`, + "2017-11-22T00:10:01Z", + false, + }, + { // also support subtraction + `${timeadd("2017-11-22T00:00:00Z", "-1h")}`, + "2017-11-21T23:00:00Z", + false, + }, + { // Invalid format timestamp + `${timeadd("2017-11-22", "-1h")}`, + nil, + true, + }, + { // Invalid format duration (day is not supported by ParseDuration) + `${timeadd("2017-11-22T00:00:00Z", "1d")}`, + nil, + true, + }, + }, + }) +} + type testFunctionConfig struct { Cases []testFunctionCase Vars map[string]ast.Variable @@ -2536,6 +2568,11 @@ func TestInterpolateFuncSubstr(t *testing.T) { nil, true, }, + { + `${substr("foo", -4, -1)}`, + nil, + true, + }, // invalid length { @@ -2780,3 +2817,146 @@ func TestInterpolateFuncAbs(t *testing.T) { }, }) } + +func TestInterpolateFuncRsaDecrypt(t *testing.T) { + testFunction(t, testFunctionConfig{ + Vars: map[string]ast.Variable{ + "var.cipher_base64": ast.Variable{ + Type: ast.TypeString, + Value: "eczGaDhXDbOFRZGhjx2etVzWbRqWDlmq0bvNt284JHVbwCgObiuyX9uV0LSAMY707IEgMkExJqXmsB4OWKxvB7epRB9G/3+F+pcrQpODlDuL9oDUAsa65zEpYF0Wbn7Oh7nrMQncyUPpyr9WUlALl0gRWytOA23S+y5joa4M34KFpawFgoqTu/2EEH4Xl1zo+0fy73fEto+nfkUY+meuyGZ1nUx/+DljP7ZqxHBFSlLODmtuTMdswUbHbXbWneW51D7Jm7xB8nSdiA2JQNK5+Sg5x8aNfgvFTt/m2w2+qpsyFa5Wjeu6fZmXSl840CA07aXbk9vN4I81WmJyblD/ZA==", + }, + "var.private_key": ast.Variable{ + Type: ast.TypeString, + Value: ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9 +c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV +Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER +1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7 +r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ +pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3 ++8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ +0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat +NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4 +Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc +pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG +kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS +Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd +qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw +1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs +mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG +Y+DfJJNd9i6TbIDWu8254/erAS6bKMhW/3q7f2kCgYAZ7Id/BiKJAWRpqTRBXlvw +BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+ +mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH +BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ +pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR +UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI +OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56 +RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh +T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7 +-----END RSA PRIVATE KEY----- +`, + }, + "var.wrong_private_key": ast.Variable{ + Type: ast.TypeString, + Value: ` +-----BEGIN RSA PRIVATE KEY----- +MIIEowIBAAKCAQEAlrCgnEVgmNKCq7KPc+zUU5IrxPu1ClMNJS7RTsTPEkbwe5SB +p+6V6WtCbD/X/lDRRGbOENChh1Phulb7lViqgrdpHydgsrKoS5ah3DfSIxLFLE00 +9Yo4TCYwgw6+s59j16ZAFVinaQ9l6Kmrb2ll136hMrz8QKh+qw+onOLd38WFgm+W +ZtUqSXf2LANzfzzy4OWFNyFqKaCAolSkPdTS9Nz+svtScvp002DQp8OdP1AgPO+l +o5N3M38Fftapwg0pCtJ5Zq0NRWIXEonXiTEMA6zy3gEZVOmDxoIFUWnmrqlMJLFy +5S6LDrHSdqJhCxDK6WRZj43X9j8spktk3eGhMwIDAQABAoIBAAem8ID/BOi9x+Tw +LFi2rhGQWqimH4tmrEQ3HGnjlKBY+d1MrUjZ1MMFr1nP5CgF8pqGnfA8p/c3Sz8r +K5tp5T6+EZiDZ2WrrOApxg5ox0MAsQKO6SGO40z6o3wEQ6rbbTaGOrraxaWQIpyu +AQanU4Sd6ZGqByVBaS1GnklZO+shCHqw73b7g1cpLEmFzcYnKHYHlUUIsstMe8E1 +BaCY0CH7JbWBjcbiTnBVwIRZuu+EjGiQuhTilYL2OWqoMVg1WU0L2IFpR8lkf/2W +SBx5J6xhwbBGASOpM+qidiN580GdPzGhWYSqKGroHEzBm6xPSmV1tadNA26WFG4p +pthLiAECgYEA5BsPRpNYJAQLu5B0N7mj9eEp0HABVEgL/MpwiImjaKdAwp78HM64 +IuPvJxs7r+xESiIz4JyjR8zrQjYOCKJsARYkmNlEuAz0SkHabCw1BdEBwUhjUGVB +efoERK6GxfAoNqmSDwsOvHFOtsmDIlbHmg7G2rUxNVpeou415BSB0B8CgYEAqR4J +YHKk2Ibr9rU+rBU33TcdTGw0aAkFNAVeqM9j0haWuFXmV3RArgoy09lH+2Ha6z/g +fTX2xSDAWV7QUlLOlBRIhurPAo2jO2yCrGHPZcWiugstrR2hTTInigaSnCmK3i7F +6sYmL3S7K01IcVNxSlWvGijtClT92Cl2WUCTfG0CgYAiEjyk4QtQTd5mxLvnOu5X +oqs5PBGmwiAwQRiv/EcRMbJFn7Oupd3xMDSflbzDmTnWDOfMy/jDl8MoH6TW+1PA +kcsjnYhbKWwvz0hN0giVdtOZSDO1ZXpzOrn6fEsbM7T9/TQY1SD9WrtUKCNTNL0Z +sM1ZC6lu+7GZCpW4HKwLJwKBgQCRT0yxQXBg1/UxwuO5ynV4rx2Oh76z0WRWIXMH +S0MyxdP1SWGkrS/SGtM3cg/GcHtA/V6vV0nUcWK0p6IJyjrTw2XZ/zGluPuTWJYi +9dvVT26Vunshrz7kbH7KuwEICy3V4IyQQHeY+QzFlR70uMS0IVFWAepCoWqHbIDT +CYhwNQKBgGPcLXmjpGtkZvggl0aZr9LsvCTckllSCFSI861kivL/rijdNoCHGxZv +dfDkLTLcz9Gk41rD9Gxn/3sqodnTAc3Z2PxFnzg1Q/u3+x6YAgBwI/g/jE2xutGW +H7CurtMwALQ/n/6LUKFmjRZjqbKX9SO2QSaC3grd6sY9Tu+bZjLe +-----END RSA PRIVATE KEY----- +`, + }, + }, + Cases: []testFunctionCase{ + // Base-64 encoded cipher decrypts correctly + { + `${rsadecrypt(var.cipher_base64, var.private_key)}`, + "message", + false, + }, + // Raw cipher + { + `${rsadecrypt(base64decode(var.cipher_base64), var.private_key)}`, + nil, + true, + }, + // Wrong key + { + `${rsadecrypt(var.cipher_base64, var.wrong_private_key)}`, + nil, + true, + }, + // Bad key + { + `${rsadecrypt(var.cipher_base64, "bad key")}`, + nil, + true, + }, + // Empty key + { + `${rsadecrypt(var.cipher_base64, "")}`, + nil, + true, + }, + // Bad cipher + { + `${rsadecrypt("bad cipher", var.private_key)}`, + nil, + true, + }, + // Bad base64-encoded cipher + { + `${rsadecrypt(base64encode("bad cipher"), var.private_key)}`, + nil, + true, + }, + // Empty cipher + { + `${rsadecrypt("", var.private_key)}`, + nil, + true, + }, + // Too many arguments + { + `${rsadecrypt("", "", "")}`, + nil, + true, + }, + // One argument + { + `${rsadecrypt("")}`, + nil, + true, + }, + // No arguments + { + `${rsadecrypt()}`, + nil, + true, + }, + }, + }) +} diff --git a/vendor/github.com/hashicorp/terraform/config/module/get_test.go b/vendor/github.com/hashicorp/terraform/config/module/get_test.go deleted file mode 100644 index 0c6ff02..0000000 --- a/vendor/github.com/hashicorp/terraform/config/module/get_test.go +++ /dev/null @@ -1,335 +0,0 @@ -package module - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httptest" - "net/url" - "os" - "regexp" - "sort" - "strings" - "testing" - - version "github.com/hashicorp/go-version" - "github.com/hashicorp/terraform/registry/regsrc" - "github.com/hashicorp/terraform/registry/response" -) - -// Map of module names and location of test modules. -// Only one version for now, as we only lookup latest from the registry. -type testMod struct { - location string - version string -} - -const ( - testCredentials = "test-auth-token" -) - -// All the locationes from the mockRegistry start with a file:// scheme. If -// the the location string here doesn't have a scheme, the mockRegistry will -// find the absolute path and return a complete URL. -var testMods = map[string][]testMod{ - "registry/foo/bar": {{ - location: "file:///download/registry/foo/bar/0.2.3//*?archive=tar.gz", - version: "0.2.3", - }}, - "registry/foo/baz": {{ - location: "file:///download/registry/foo/baz/1.10.0//*?archive=tar.gz", - version: "1.10.0", - }}, - "registry/local/sub": {{ - location: "test-fixtures/registry-tar-subdir/foo.tgz//*?archive=tar.gz", - version: "0.1.2", - }}, - "exists-in-registry/identifier/provider": {{ - location: "file:///registry/exists", - version: "0.2.0", - }}, - "relative/foo/bar": {{ // There is an exception for the "relative/" prefix in the test registry server - location: "/relative-path", - version: "0.2.0", - }}, - "test-versions/name/provider": { - {version: "2.2.0"}, - {version: "2.1.1"}, - {version: "1.2.2"}, - {version: "1.2.1"}, - }, - "private/name/provider": { - {version: "1.0.0"}, - }, -} - -func latestVersion(versions []string) string { - var col version.Collection - for _, v := range versions { - ver, err := version.NewVersion(v) - if err != nil { - panic(err) - } - col = append(col, ver) - } - - sort.Sort(col) - return col[len(col)-1].String() -} - -func mockRegHandler() http.Handler { - mux := http.NewServeMux() - - download := func(w http.ResponseWriter, r *http.Request) { - p := strings.TrimLeft(r.URL.Path, "/") - // handle download request - re := regexp.MustCompile(`^([-a-z]+/\w+/\w+).*/download$`) - // download lookup - matches := re.FindStringSubmatch(p) - if len(matches) != 2 { - w.WriteHeader(http.StatusBadRequest) - return - } - - // check for auth - if strings.Contains(matches[0], "private/") { - if !strings.Contains(r.Header.Get("Authorization"), testCredentials) { - http.Error(w, "", http.StatusForbidden) - } - } - - versions, ok := testMods[matches[1]] - if !ok { - http.NotFound(w, r) - return - } - mod := versions[0] - - location := mod.location - if !strings.HasPrefix(matches[0], "relative/") && !strings.HasPrefix(location, "file:///") { - // we can't use filepath.Abs because it will clean `//` - wd, _ := os.Getwd() - location = fmt.Sprintf("file://%s/%s", wd, location) - } - - w.Header().Set("X-Terraform-Get", location) - w.WriteHeader(http.StatusNoContent) - // no body - return - } - - versions := func(w http.ResponseWriter, r *http.Request) { - p := strings.TrimLeft(r.URL.Path, "/") - re := regexp.MustCompile(`^([-a-z]+/\w+/\w+)/versions$`) - matches := re.FindStringSubmatch(p) - if len(matches) != 2 { - w.WriteHeader(http.StatusBadRequest) - return - } - - // check for auth - if strings.Contains(matches[1], "private/") { - if !strings.Contains(r.Header.Get("Authorization"), testCredentials) { - http.Error(w, "", http.StatusForbidden) - } - } - - name := matches[1] - versions, ok := testMods[name] - if !ok { - http.NotFound(w, r) - return - } - - // only adding the single requested module for now - // this is the minimal that any regisry is epected to support - mpvs := &response.ModuleProviderVersions{ - Source: name, - } - - for _, v := range versions { - mv := &response.ModuleVersion{ - Version: v.version, - } - mpvs.Versions = append(mpvs.Versions, mv) - } - - resp := response.ModuleVersions{ - Modules: []*response.ModuleProviderVersions{mpvs}, - } - - js, err := json.Marshal(resp) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - w.Header().Set("Content-Type", "application/json") - w.Write(js) - } - - mux.Handle("/v1/modules/", - http.StripPrefix("/v1/modules/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if strings.HasSuffix(r.URL.Path, "/download") { - download(w, r) - return - } - - if strings.HasSuffix(r.URL.Path, "/versions") { - versions(w, r) - return - } - - http.NotFound(w, r) - })), - ) - - mux.HandleFunc("/.well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json") - io.WriteString(w, `{"modules.v1":"http://localhost/v1/modules/"}`) - }) - return mux -} - -// Just enough like a registry to exercise our code. -// Returns the location of the latest version -func mockRegistry() *httptest.Server { - server := httptest.NewServer(mockRegHandler()) - return server -} - -// GitHub archives always contain the module source in a single subdirectory, -// so the registry will return a path with with a `//*` suffix. We need to make -// sure this doesn't intefere with our internal handling of `//` subdir. -func TestRegistryGitHubArchive(t *testing.T) { - server := mockRegistry() - defer server.Close() - - disco := testDisco(server) - storage := testStorage(t, disco) - - tree := NewTree("", testConfig(t, "registry-tar-subdir")) - - storage.Mode = GetModeGet - if err := tree.Load(storage); err != nil { - t.Fatalf("err: %s", err) - } - - if !tree.Loaded() { - t.Fatal("should be loaded") - } - - storage.Mode = GetModeNone - if err := tree.Load(storage); err != nil { - t.Fatalf("err: %s", err) - } - - // stop the registry server, and make sure that we don't need to call out again - server.Close() - tree = NewTree("", testConfig(t, "registry-tar-subdir")) - - storage.Mode = GetModeGet - if err := tree.Load(storage); err != nil { - t.Fatalf("err: %s", err) - } - - if !tree.Loaded() { - t.Fatal("should be loaded") - } - - actual := strings.TrimSpace(tree.String()) - expected := strings.TrimSpace(treeLoadSubdirStr) - if actual != expected { - t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected) - } -} - -// Test that the //subdir notation can be used with registry modules -func TestRegisryModuleSubdir(t *testing.T) { - server := mockRegistry() - defer server.Close() - - disco := testDisco(server) - storage := testStorage(t, disco) - tree := NewTree("", testConfig(t, "registry-subdir")) - - storage.Mode = GetModeGet - if err := tree.Load(storage); err != nil { - t.Fatalf("err: %s", err) - } - - if !tree.Loaded() { - t.Fatal("should be loaded") - } - - storage.Mode = GetModeNone - if err := tree.Load(storage); err != nil { - t.Fatalf("err: %s", err) - } - - actual := strings.TrimSpace(tree.String()) - expected := strings.TrimSpace(treeLoadRegistrySubdirStr) - if actual != expected { - t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected) - } -} - -func TestAccRegistryDiscover(t *testing.T) { - if os.Getenv("TF_ACC") == "" { - t.Skip("skipping ACC test") - } - - // simply check that we get a valid github URL for this from the registry - module, err := regsrc.ParseModuleSource("hashicorp/consul/aws") - if err != nil { - t.Fatal(err) - } - - s := NewStorage("/tmp", nil, nil) - loc, err := s.lookupModuleLocation(module, "") - if err != nil { - t.Fatal(err) - } - - u, err := url.Parse(loc) - if err != nil { - t.Fatal(err) - } - - if !strings.HasSuffix(u.Host, "github.com") { - t.Fatalf("expected host 'github.com', got: %q", u.Host) - } - - if !strings.Contains(u.String(), "consul") { - t.Fatalf("url doesn't contain 'consul': %s", u.String()) - } -} - -func TestAccRegistryLoad(t *testing.T) { - if os.Getenv("TF_ACC") == "" { - t.Skip("skipping ACC test") - } - - storage := testStorage(t, nil) - tree := NewTree("", testConfig(t, "registry-load")) - - storage.Mode = GetModeGet - if err := tree.Load(storage); err != nil { - t.Fatalf("err: %s", err) - } - - if !tree.Loaded() { - t.Fatal("should be loaded") - } - - storage.Mode = GetModeNone - if err := tree.Load(storage); err != nil { - t.Fatalf("err: %s", err) - } - - // TODO expand this further by fetching some metadata from the registry - actual := strings.TrimSpace(tree.String()) - if !strings.Contains(actual, "(path: vault)") { - t.Fatal("missing vault module, got:\n", actual) - } -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/inode.go b/vendor/github.com/hashicorp/terraform/config/module/inode.go index 8603ee2..da520ab 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/inode.go +++ b/vendor/github.com/hashicorp/terraform/config/module/inode.go @@ -1,4 +1,4 @@ -// +build linux darwin openbsd netbsd solaris +// +build linux darwin openbsd netbsd solaris dragonfly package module diff --git a/vendor/github.com/hashicorp/terraform/config/module/module_test.go b/vendor/github.com/hashicorp/terraform/config/module/module_test.go index 9685f7f..62e7ed2 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/module_test.go +++ b/vendor/github.com/hashicorp/terraform/config/module/module_test.go @@ -1,16 +1,13 @@ package module import ( - "fmt" "io/ioutil" "log" - "net/http/httptest" "os" "path/filepath" "testing" "github.com/hashicorp/terraform/config" - "github.com/hashicorp/terraform/svchost" "github.com/hashicorp/terraform/svchost/disco" ) @@ -49,18 +46,3 @@ func testStorage(t *testing.T, d *disco.Disco) *Storage { t.Helper() return NewStorage(tempDir(t), d, nil) } - -// test discovery maps registry.terraform.io, localhost, localhost.localdomain, -// and example.com to the test server. -func testDisco(s *httptest.Server) *disco.Disco { - services := map[string]interface{}{ - "modules.v1": fmt.Sprintf("%s/v1/modules/", s.URL), - } - d := disco.NewDisco() - - d.ForceHostServices(svchost.Hostname("registry.terraform.io"), services) - d.ForceHostServices(svchost.Hostname("localhost"), services) - d.ForceHostServices(svchost.Hostname("localhost.localdomain"), services) - d.ForceHostServices(svchost.Hostname("example.com"), services) - return d -} diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage.go b/vendor/github.com/hashicorp/terraform/config/module/storage.go index 1217197..c1588d6 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/storage.go +++ b/vendor/github.com/hashicorp/terraform/config/module/storage.go @@ -9,6 +9,7 @@ import ( "path/filepath" getter "github.com/hashicorp/go-getter" + "github.com/hashicorp/terraform/registry" "github.com/hashicorp/terraform/registry/regsrc" "github.com/hashicorp/terraform/svchost/auth" "github.com/hashicorp/terraform/svchost/disco" @@ -73,20 +74,17 @@ type Storage struct { Ui cli.Ui // Mode is the GetMode that will be used for various operations. Mode GetMode + + registry *registry.Client } func NewStorage(dir string, services *disco.Disco, creds auth.CredentialsSource) *Storage { - s := &Storage{ - StorageDir: dir, - Services: services, - Creds: creds, - } + regClient := registry.NewClient(services, creds, nil) - // make sure this isn't nil - if s.Services == nil { - s.Services = disco.NewDisco() + return &Storage{ + StorageDir: dir, + registry: regClient, } - return s } // loadManifest returns the moduleManifest file from the parent directory. @@ -297,17 +295,17 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e } rec.registry = true - log.Printf("[TRACE] %q is a registry module", mod.Module()) + log.Printf("[TRACE] %q is a registry module", mod.Display()) versions, err := s.moduleVersions(mod.String()) if err != nil { - log.Printf("[ERROR] error looking up versions for %q: %s", mod.Module(), err) + log.Printf("[ERROR] error looking up versions for %q: %s", mod.Display(), err) return rec, err } match, err := newestRecord(versions, constraint) if err != nil { - log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Module(), constraint, err) + log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Display(), constraint, err) } log.Printf("[DEBUG] matched %q version %s for %s", mod, match.Version, constraint) @@ -318,13 +316,13 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e // we need to lookup available versions // Only on Get if it's not found, on unconditionally on Update if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) { - resp, err := s.lookupModuleVersions(mod) + resp, err := s.registry.Versions(mod) if err != nil { return rec, err } if len(resp.Modules) == 0 { - return rec, fmt.Errorf("module %q not found in registry", mod.Module()) + return rec, fmt.Errorf("module %q not found in registry", mod.Display()) } match, err := newestVersion(resp.Modules[0].Versions, constraint) @@ -333,12 +331,12 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e } if match == nil { - return rec, fmt.Errorf("no versions for %q found matching %q", mod.Module(), constraint) + return rec, fmt.Errorf("no versions for %q found matching %q", mod.Display(), constraint) } rec.Version = match.Version - rec.url, err = s.lookupModuleLocation(mod, rec.Version) + rec.url, err = s.registry.Location(mod, rec.Version) if err != nil { return rec, err } diff --git a/vendor/github.com/hashicorp/terraform/config/module/storage_test.go b/vendor/github.com/hashicorp/terraform/config/module/storage_test.go index 6fa1212..1081119 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/storage_test.go +++ b/vendor/github.com/hashicorp/terraform/config/module/storage_test.go @@ -2,15 +2,20 @@ package module import ( "io/ioutil" + "net/url" "os" "path/filepath" + "strings" "testing" + + "github.com/hashicorp/terraform/registry/regsrc" + "github.com/hashicorp/terraform/registry/test" ) func TestGetModule(t *testing.T) { - server := mockRegistry() + server := test.Registry() defer server.Close() - disco := testDisco(server) + disco := test.Disco(server) td, err := ioutil.TempDir("", "tf") if err != nil { @@ -19,7 +24,7 @@ func TestGetModule(t *testing.T) { defer os.RemoveAll(td) storage := NewStorage(td, disco, nil) - // this module exists in a test fixture, and is known by the mockRegistry + // this module exists in a test fixture, and is known by the test.Registry // relative to our cwd. err = storage.GetModule(filepath.Join(td, "foo"), "registry/local/sub") if err != nil { @@ -45,5 +50,140 @@ func TestGetModule(t *testing.T) { if err != nil { t.Fatal(err) } - +} + +// GitHub archives always contain the module source in a single subdirectory, +// so the registry will return a path with with a `//*` suffix. We need to make +// sure this doesn't intefere with our internal handling of `//` subdir. +func TestRegistryGitHubArchive(t *testing.T) { + server := test.Registry() + defer server.Close() + + disco := test.Disco(server) + storage := testStorage(t, disco) + + tree := NewTree("", testConfig(t, "registry-tar-subdir")) + + storage.Mode = GetModeGet + if err := tree.Load(storage); err != nil { + t.Fatalf("err: %s", err) + } + + if !tree.Loaded() { + t.Fatal("should be loaded") + } + + storage.Mode = GetModeNone + if err := tree.Load(storage); err != nil { + t.Fatalf("err: %s", err) + } + + // stop the registry server, and make sure that we don't need to call out again + server.Close() + tree = NewTree("", testConfig(t, "registry-tar-subdir")) + + storage.Mode = GetModeGet + if err := tree.Load(storage); err != nil { + t.Fatalf("err: %s", err) + } + + if !tree.Loaded() { + t.Fatal("should be loaded") + } + + actual := strings.TrimSpace(tree.String()) + expected := strings.TrimSpace(treeLoadSubdirStr) + if actual != expected { + t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected) + } +} + +// Test that the //subdir notation can be used with registry modules +func TestRegisryModuleSubdir(t *testing.T) { + server := test.Registry() + defer server.Close() + + disco := test.Disco(server) + storage := testStorage(t, disco) + tree := NewTree("", testConfig(t, "registry-subdir")) + + storage.Mode = GetModeGet + if err := tree.Load(storage); err != nil { + t.Fatalf("err: %s", err) + } + + if !tree.Loaded() { + t.Fatal("should be loaded") + } + + storage.Mode = GetModeNone + if err := tree.Load(storage); err != nil { + t.Fatalf("err: %s", err) + } + + actual := strings.TrimSpace(tree.String()) + expected := strings.TrimSpace(treeLoadRegistrySubdirStr) + if actual != expected { + t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected) + } +} + +func TestAccRegistryDiscover(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("skipping ACC test") + } + + // simply check that we get a valid github URL for this from the registry + module, err := regsrc.ParseModuleSource("hashicorp/consul/aws") + if err != nil { + t.Fatal(err) + } + + s := NewStorage("/tmp", nil, nil) + loc, err := s.registry.Location(module, "") + if err != nil { + t.Fatal(err) + } + + u, err := url.Parse(loc) + if err != nil { + t.Fatal(err) + } + + if !strings.HasSuffix(u.Host, "github.com") { + t.Fatalf("expected host 'github.com', got: %q", u.Host) + } + + if !strings.Contains(u.String(), "consul") { + t.Fatalf("url doesn't contain 'consul': %s", u.String()) + } +} + +func TestAccRegistryLoad(t *testing.T) { + if os.Getenv("TF_ACC") == "" { + t.Skip("skipping ACC test") + } + + storage := testStorage(t, nil) + tree := NewTree("", testConfig(t, "registry-load")) + + storage.Mode = GetModeGet + if err := tree.Load(storage); err != nil { + t.Fatalf("err: %s", err) + } + + if !tree.Loaded() { + t.Fatal("should be loaded") + } + + storage.Mode = GetModeNone + if err := tree.Load(storage); err != nil { + t.Fatalf("err: %s", err) + } + + // TODO expand this further by fetching some metadata from the registry + actual := strings.TrimSpace(tree.String()) + if !strings.Contains(actual, "(path: vault)") { + t.Fatal("missing vault module, got:\n", actual) + } } diff --git a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go index ea68b4f..8a55e06 100644 --- a/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go +++ b/vendor/github.com/hashicorp/terraform/config/resource_mode_string.go @@ -2,7 +2,7 @@ package config -import "fmt" +import "strconv" const _ResourceMode_name = "ManagedResourceModeDataResourceMode" @@ -10,7 +10,7 @@ var _ResourceMode_index = [...]uint8{0, 19, 35} func (i ResourceMode) String() string { if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) { - return fmt.Sprintf("ResourceMode(%d)", i) + return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")" } return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/config_unix.go b/vendor/github.com/hashicorp/terraform/config_unix.go index d28d749..85c313a 100644 --- a/vendor/github.com/hashicorp/terraform/config_unix.go +++ b/vendor/github.com/hashicorp/terraform/config_unix.go @@ -32,7 +32,6 @@ func configDir() (string, error) { func homeDir() (string, error) { // First prefer the HOME environmental variable if home := os.Getenv("HOME"); home != "" { - // FIXME: homeDir gets called from globalPluginDirs during init, before // the logging is setup. We should move meta initializtion outside of // init, but in the meantime we just need to silence this output. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go index 3a97629..38cd8c7 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/getsource_string.go @@ -2,7 +2,7 @@ package schema -import "fmt" +import "strconv" const ( _getSource_name_0 = "getSourceStategetSourceConfig" @@ -13,8 +13,6 @@ const ( var ( _getSource_index_0 = [...]uint8{0, 14, 29} - _getSource_index_1 = [...]uint8{0, 13} - _getSource_index_2 = [...]uint8{0, 12} _getSource_index_3 = [...]uint8{0, 18, 32} ) @@ -31,6 +29,6 @@ func (i getSource) String() string { i -= 15 return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]] default: - return fmt.Sprintf("getSource(%d)", i) + return "getSource(" + strconv.FormatInt(int64(i), 10) + ")" } } diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go index 8290ba5..c8e99b9 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource.go @@ -282,7 +282,6 @@ func (r *Resource) ReadDataApply( d *terraform.InstanceDiff, meta interface{}, ) (*terraform.InstanceState, error) { - // Data sources are always built completely from scratch // on each read, so the source state is always nil. data, err := schemaMap(r.Schema).Data(nil, d) diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go index 970dc7b..9ab8bcc 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_data.go @@ -445,7 +445,7 @@ func (d *ResourceData) init() { } func (d *ResourceData) diffChange( - k string) (interface{}, interface{}, bool, bool) { + k string) (interface{}, interface{}, bool, bool, bool) { // Get the change between the state and the config. o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact) if !o.Exists { @@ -456,7 +456,7 @@ func (d *ResourceData) diffChange( } // Return the old, new, and whether there is a change - return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed + return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false } func (d *ResourceData) getChange( diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go index 4fc1dbb..822d0dc 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/resource_diff.go @@ -236,8 +236,8 @@ func (d *ResourceDiff) clear(key string) error { // diffChange helps to implement resourceDiffer and derives its change values // from ResourceDiff's own change data, in addition to existing diff, config, and state. -func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool) { - old, new := d.getChange(key) +func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) { + old, new, customized := d.getChange(key) if !old.Exists { old.Value = nil @@ -246,7 +246,7 @@ func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, b new.Value = nil } - return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed + return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized } // SetNew is used to set a new diff value for the mentioned key. The value must @@ -327,7 +327,7 @@ func (d *ResourceDiff) Get(key string) interface{} { // results from the exact levels for the new diff, then from state and diff as // per normal. func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) { - old, new := d.getChange(key) + old, new, _ := d.getChange(key) return old.Value, new.Value } @@ -387,18 +387,17 @@ func (d *ResourceDiff) Id() string { // This implementation differs from ResourceData's in the way that we first get // results from the exact levels for the new diff, then from state and diff as // per normal. -func (d *ResourceDiff) getChange(key string) (getResult, getResult) { +func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) { old := d.get(strings.Split(key, "."), "state") var new getResult for p := range d.updatedKeys { if childAddrOf(key, p) { new = d.getExact(strings.Split(key, "."), "newDiff") - goto done + return old, new, true } } new = d.get(strings.Split(key, "."), "newDiff") -done: - return old, new + return old, new, false } // get performs the appropriate multi-level reader logic for ResourceDiff, diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go index 6cc71df..6773fe5 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema.go @@ -296,8 +296,7 @@ func (s *Schema) ZeroValue() interface{} { } } -func (s *Schema) finalizeDiff( - d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff { +func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff { if d == nil { return d } @@ -337,14 +336,21 @@ func (s *Schema) finalizeDiff( return d } - if s.Computed && !d.NewComputed { - if d.Old != "" && d.New == "" { - // This is a computed value with an old value set already, - // just let it go. - return nil + if s.Computed { + // FIXME: This is where the customized bool from getChange finally + // comes into play. It allows the previously incorrect behavior + // of an empty string being used as "unset" when the value is + // computed. This should be removed once we can properly + // represent an unset/nil value from the configuration. + if !customized { + if d.Old != "" && d.New == "" { + // This is a computed value with an old value set already, + // just let it go. + return nil + } } - if d.New == "" { + if d.New == "" && !d.NewComputed { // Computed attribute without a new value set d.NewComputed = true } @@ -744,7 +750,7 @@ func isValidFieldName(name string) bool { // This helps facilitate diff logic for both ResourceData and ResoureDiff with // minimal divergence in code. type resourceDiffer interface { - diffChange(string) (interface{}, interface{}, bool, bool) + diffChange(string) (interface{}, interface{}, bool, bool, bool) Get(string) interface{} GetChange(string) (interface{}, interface{}) GetOk(string) (interface{}, bool) @@ -797,7 +803,7 @@ func (m schemaMap) diffList( diff *terraform.InstanceDiff, d resourceDiffer, all bool) error { - o, n, _, computedList := d.diffChange(k) + o, n, _, computedList, customized := d.diffChange(k) if computedList { n = nil } @@ -864,10 +870,13 @@ func (m schemaMap) diffList( oldStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Figure out the maximum @@ -920,7 +929,7 @@ func (m schemaMap) diffMap( // First get all the values from the state var stateMap, configMap map[string]string - o, n, _, nComputed := d.diffChange(k) + o, n, _, nComputed, customized := d.diffChange(k) if err := mapstructure.WeakDecode(o, &stateMap); err != nil { return fmt.Errorf("%s: %s", k, err) } @@ -972,6 +981,7 @@ func (m schemaMap) diffMap( Old: oldStr, New: newStr, }, + customized, ) } @@ -989,16 +999,22 @@ func (m schemaMap) diffMap( continue } - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: old, - New: v, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: old, + New: v, + }, + customized, + ) } for k, v := range stateMap { - diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: v, - NewRemoved: true, - }) + diff.Attributes[prefix+k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: v, + NewRemoved: true, + }, + customized, + ) } return nil @@ -1011,7 +1027,7 @@ func (m schemaMap) diffSet( d resourceDiffer, all bool) error { - o, n, _, computedSet := d.diffChange(k) + o, n, _, computedSet, customized := d.diffChange(k) if computedSet { n = nil } @@ -1070,20 +1086,26 @@ func (m schemaMap) diffSet( countStr = "" } - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: countStr, - NewComputed: true, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: countStr, + NewComputed: true, + }, + customized, + ) return nil } // If the counts are not the same, then record that diff changed := oldLen != newLen if changed || all { - diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: oldStr, - New: newStr, - }) + diff.Attributes[k+".#"] = countSchema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: oldStr, + New: newStr, + }, + customized, + ) } // Build the list of codes that will make up our set. This is the @@ -1133,7 +1155,7 @@ func (m schemaMap) diffString( all bool) error { var originalN interface{} var os, ns string - o, n, _, computed := d.diffChange(k) + o, n, _, computed, customized := d.diffChange(k) if schema.StateFunc != nil && n != nil { originalN = n n = schema.StateFunc(n) @@ -1170,13 +1192,16 @@ func (m schemaMap) diffString( return nil } - diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{ - Old: os, - New: ns, - NewExtra: originalN, - NewRemoved: removed, - NewComputed: computed, - }) + diff.Attributes[k] = schema.finalizeDiff( + &terraform.ResourceAttrDiff{ + Old: os, + New: ns, + NewExtra: originalN, + NewRemoved: removed, + NewComputed: computed, + }, + customized, + ) return nil } @@ -1503,7 +1528,6 @@ func (m schemaMap) validatePrimitive( raw interface{}, schema *Schema, c *terraform.ResourceConfig) ([]string, []error) { - // Catch if the user gave a complex type where a primitive was // expected, so we can return a friendly error message that // doesn't contain Go type system terminology. diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/schema_test.go b/vendor/github.com/hashicorp/terraform/helper/schema/schema_test.go index 3698fe0..bda54cc 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/schema_test.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/schema_test.go @@ -3110,6 +3110,38 @@ func TestSchemaMap_Diff(t *testing.T) { Err: true, }, + + // A lot of resources currently depended on using the empty string as a + // nil/unset value. + // FIXME: We want this to eventually produce a diff, since there + // technically is a new value in the config. + { + Name: "optional, computed, empty string", + Schema: map[string]*Schema{ + "attr": &Schema{ + Type: TypeString, + Optional: true, + Computed: true, + }, + }, + + State: &terraform.InstanceState{ + Attributes: map[string]string{ + "attr": "bar", + }, + }, + + // this does necessarily depend on an interpolated value, but this + // is often how it comes about in a configuration, otherwise the + // value would be unset. + Config: map[string]interface{}{ + "attr": "${var.foo}", + }, + + ConfigVariables: map[string]ast.Variable{ + "var.foo": interfaceToVariableSwallowError(""), + }, + }, } for i, tc := range cases { diff --git a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go index 1610cec..3bc3ac4 100644 --- a/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go +++ b/vendor/github.com/hashicorp/terraform/helper/schema/valuetype_string.go @@ -2,7 +2,7 @@ package schema -import "fmt" +import "strconv" const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject" @@ -10,7 +10,7 @@ var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77} func (i ValueType) String() string { if i < 0 || i >= ValueType(len(_ValueType_index)-1) { - return fmt.Sprintf("ValueType(%d)", i) + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" } return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/config/module/registry.go b/vendor/github.com/hashicorp/terraform/registry/client.go similarity index 67% rename from vendor/github.com/hashicorp/terraform/config/module/registry.go rename to vendor/github.com/hashicorp/terraform/registry/client.go index da67c5a..b4cd798 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/registry.go +++ b/vendor/github.com/hashicorp/terraform/registry/client.go @@ -1,4 +1,4 @@ -package module +package registry import ( "encoding/json" @@ -12,75 +12,75 @@ import ( "time" cleanhttp "github.com/hashicorp/go-cleanhttp" - "github.com/hashicorp/terraform/registry/regsrc" "github.com/hashicorp/terraform/registry/response" "github.com/hashicorp/terraform/svchost" + "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/svchost/disco" "github.com/hashicorp/terraform/version" ) const ( - defaultRegistry = "registry.terraform.io" - registryServiceID = "registry.v1" xTerraformGet = "X-Terraform-Get" xTerraformVersion = "X-Terraform-Version" requestTimeout = 10 * time.Second serviceID = "modules.v1" ) -var ( - httpClient *http.Client - tfVersion = version.String() -) +var tfVersion = version.String() -func init() { - httpClient = cleanhttp.DefaultPooledClient() - httpClient.Timeout = requestTimeout +// Client provides methods to query Terraform Registries. +type Client struct { + // this is the client to be used for all requests. + client *http.Client + + // services is a required *disco.Disco, which may have services and + // credentials pre-loaded. + services *disco.Disco + + // Creds optionally provides credentials for communicating with service + // providers. + creds auth.CredentialsSource } -type errModuleNotFound string - -func (e errModuleNotFound) Error() string { - return `module "` + string(e) + `" not found` -} - -func (s *Storage) discoverRegURL(host svchost.Hostname) *url.URL { - regURL := s.Services.DiscoverServiceURL(host, serviceID) - if regURL == nil { - return nil +func NewClient(services *disco.Disco, creds auth.CredentialsSource, client *http.Client) *Client { + if services == nil { + services = disco.NewDisco() } - if !strings.HasSuffix(regURL.Path, "/") { - regURL.Path += "/" + services.SetCredentialsSource(creds) + + if client == nil { + client = cleanhttp.DefaultPooledClient() + client.Timeout = requestTimeout } - return regURL -} + services.Transport = client.Transport.(*http.Transport) -func (s *Storage) addRequestCreds(host svchost.Hostname, req *http.Request) { - if s.Creds == nil { - return - } - - creds, err := s.Creds.ForHost(host) - if err != nil { - log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err) - return - } - - if creds != nil { - creds.PrepareRequest(req) + return &Client{ + client: client, + services: services, + creds: creds, } } -// Lookup module versions in the registry. -func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) { +// Discover qeuries the host, and returns the url for the registry. +func (c *Client) Discover(host svchost.Hostname) *url.URL { + service := c.services.DiscoverServiceURL(host, serviceID) + if !strings.HasSuffix(service.Path, "/") { + service.Path += "/" + } + return service +} + +// Versions queries the registry for a module, and returns the available versions. +func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) { host, err := module.SvcHost() if err != nil { return nil, err } - service := s.discoverRegURL(host) + service := c.Discover(host) if service == nil { return nil, fmt.Errorf("host %s does not provide Terraform modules", host) } @@ -99,10 +99,10 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV return nil, err } - s.addRequestCreds(host, req) + c.addRequestCreds(host, req) req.Header.Set(xTerraformVersion, tfVersion) - resp, err := httpClient.Do(req) + resp, err := c.client.Do(req) if err != nil { return nil, err } @@ -112,7 +112,7 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV case http.StatusOK: // OK case http.StatusNotFound: - return nil, errModuleNotFound(module.String()) + return nil, fmt.Errorf("module %q not found", module.String()) default: return nil, fmt.Errorf("error looking up module versions: %s", resp.Status) } @@ -133,14 +133,31 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV return &versions, nil } -// lookup the location of a specific module version in the registry -func (s *Storage) lookupModuleLocation(module *regsrc.Module, version string) (string, error) { +func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) { + if c.creds == nil { + return + } + + creds, err := c.creds.ForHost(host) + if err != nil { + log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err) + return + } + + if creds != nil { + creds.PrepareRequest(req) + } +} + +// Location find the download location for a specific version module. +// This returns a string, because the final location may contain special go-getter syntax. +func (c *Client) Location(module *regsrc.Module, version string) (string, error) { host, err := module.SvcHost() if err != nil { return "", err } - service := s.discoverRegURL(host) + service := c.Discover(host) if service == nil { return "", fmt.Errorf("host %s does not provide Terraform modules", host.ForDisplay()) } @@ -163,10 +180,10 @@ func (s *Storage) lookupModuleLocation(module *regsrc.Module, version string) (s return "", err } - s.addRequestCreds(host, req) + c.addRequestCreds(host, req) req.Header.Set(xTerraformVersion, tfVersion) - resp, err := httpClient.Do(req) + resp, err := c.client.Do(req) if err != nil { return "", err } diff --git a/vendor/github.com/hashicorp/terraform/config/module/registry_test.go b/vendor/github.com/hashicorp/terraform/registry/client_test.go similarity index 73% rename from vendor/github.com/hashicorp/terraform/config/module/registry_test.go rename to vendor/github.com/hashicorp/terraform/registry/client_test.go index dab7444..a4ef640 100644 --- a/vendor/github.com/hashicorp/terraform/config/module/registry_test.go +++ b/vendor/github.com/hashicorp/terraform/registry/client_test.go @@ -1,4 +1,4 @@ -package module +package registry import ( "os" @@ -7,16 +7,15 @@ import ( version "github.com/hashicorp/go-version" "github.com/hashicorp/terraform/registry/regsrc" - "github.com/hashicorp/terraform/svchost" - "github.com/hashicorp/terraform/svchost/auth" + "github.com/hashicorp/terraform/registry/test" "github.com/hashicorp/terraform/svchost/disco" ) func TestLookupModuleVersions(t *testing.T) { - server := mockRegistry() + server := test.Registry() defer server.Close() - regDisco := testDisco(server) + client := NewClient(test.Disco(server), nil, nil) // test with and without a hostname for _, src := range []string{ @@ -28,8 +27,7 @@ func TestLookupModuleVersions(t *testing.T) { t.Fatal(err) } - s := &Storage{Services: regDisco} - resp, err := s.lookupModuleVersions(modsrc) + resp, err := client.Versions(modsrc) if err != nil { t.Fatal(err) } @@ -58,11 +56,10 @@ func TestLookupModuleVersions(t *testing.T) { } func TestRegistryAuth(t *testing.T) { - server := mockRegistry() + server := test.Registry() defer server.Close() - regDisco := testDisco(server) - storage := testStorage(t, regDisco) + client := NewClient(test.Disco(server), nil, nil) src := "private/name/provider" mod, err := regsrc.ParseModuleSource(src) @@ -71,36 +68,32 @@ func TestRegistryAuth(t *testing.T) { } // both should fail without auth - _, err = storage.lookupModuleVersions(mod) + _, err = client.Versions(mod) if err == nil { t.Fatal("expected error") } - _, err = storage.lookupModuleLocation(mod, "1.0.0") + _, err = client.Location(mod, "1.0.0") if err == nil { t.Fatal("expected error") } - storage.Creds = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{ - svchost.Hostname(defaultRegistry): {"token": testCredentials}, - }) + client = NewClient(test.Disco(server), test.Credentials, nil) - _, err = storage.lookupModuleVersions(mod) + _, err = client.Versions(mod) if err != nil { t.Fatal(err) } - _, err = storage.lookupModuleLocation(mod, "1.0.0") + _, err = client.Location(mod, "1.0.0") if err != nil { t.Fatal(err) } - } func TestLookupModuleLocationRelative(t *testing.T) { - server := mockRegistry() + server := test.Registry() defer server.Close() - regDisco := testDisco(server) - storage := testStorage(t, regDisco) + client := NewClient(test.Disco(server), nil, nil) src := "relative/foo/bar" mod, err := regsrc.ParseModuleSource(src) @@ -108,7 +101,7 @@ func TestLookupModuleLocationRelative(t *testing.T) { t.Fatal(err) } - got, err := storage.lookupModuleLocation(mod, "0.2.0") + got, err := client.Location(mod, "0.2.0") if err != nil { t.Fatal(err) } @@ -117,7 +110,6 @@ func TestLookupModuleLocationRelative(t *testing.T) { if got != want { t.Errorf("wrong location %s; want %s", got, want) } - } func TestAccLookupModuleVersions(t *testing.T) { @@ -129,17 +121,15 @@ func TestAccLookupModuleVersions(t *testing.T) { // test with and without a hostname for _, src := range []string{ "terraform-aws-modules/vpc/aws", - defaultRegistry + "/terraform-aws-modules/vpc/aws", + regsrc.PublicRegistryHost.String() + "/terraform-aws-modules/vpc/aws", } { modsrc, err := regsrc.ParseModuleSource(src) if err != nil { t.Fatal(err) } - s := &Storage{ - Services: regDisco, - } - resp, err := s.lookupModuleVersions(modsrc) + s := NewClient(regDisco, nil, nil) + resp, err := s.Versions(modsrc) if err != nil { t.Fatal(err) } @@ -169,11 +159,10 @@ func TestAccLookupModuleVersions(t *testing.T) { // the error should reference the config source exatly, not the discovered path. func TestLookupLookupModuleError(t *testing.T) { - server := mockRegistry() + server := test.Registry() defer server.Close() - regDisco := testDisco(server) - storage := testStorage(t, regDisco) + client := NewClient(test.Disco(server), nil, nil) // this should not be found in teh registry src := "bad/local/path" @@ -182,7 +171,7 @@ func TestLookupLookupModuleError(t *testing.T) { t.Fatal(err) } - _, err = storage.lookupModuleLocation(mod, "0.2.0") + _, err = client.Location(mod, "0.2.0") if err == nil { t.Fatal("expected error") } diff --git a/vendor/github.com/hashicorp/terraform/terraform/context.go b/vendor/github.com/hashicorp/terraform/terraform/context.go index cede4f8..53a1231 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context.go @@ -145,13 +145,8 @@ func NewContext(opts *ContextOpts) (*Context, error) { // If our state is from the future, then error. Callers can avoid // this error by explicitly setting `StateFutureAllowed`. - if !opts.StateFutureAllowed && state.FromFutureTerraform() { - return nil, fmt.Errorf( - "Terraform doesn't allow running any operations against a state\n"+ - "that was written by a future Terraform version. The state is\n"+ - "reporting it is written by Terraform '%s'.\n\n"+ - "Please run at least that version of Terraform to continue.", - state.TFVersion) + if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed { + return nil, err } // Explicitly reset our state version to our current version so that diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_apply_test.go b/vendor/github.com/hashicorp/terraform/terraform/context_apply_test.go index 7bc28f3..ddaaae0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_apply_test.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_apply_test.go @@ -3883,7 +3883,6 @@ func TestContext2Apply_outputDependsOn(t *testing.T) { info *InstanceInfo, is *InstanceState, id *InstanceDiff) (*InstanceState, error) { - // Sleep to allow parallel execution time.Sleep(50 * time.Millisecond) diff --git a/vendor/github.com/hashicorp/terraform/terraform/context_plan_test.go b/vendor/github.com/hashicorp/terraform/terraform/context_plan_test.go index 00d8159..8b68087 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/context_plan_test.go +++ b/vendor/github.com/hashicorp/terraform/terraform/context_plan_test.go @@ -2401,6 +2401,32 @@ func TestContext2Plan_hook(t *testing.T) { } } +func TestContext2Plan_closeProvider(t *testing.T) { + // this fixture only has an aliased provider located in the module, to make + // sure that the provier name contains a path more complex than + // "provider.aws". + m := testModule(t, "plan-close-module-provider") + p := testProvider("aws") + p.DiffFn = testDiffFn + ctx := testContext2(t, &ContextOpts{ + Module: m, + ProviderResolver: ResourceProviderResolverFixed( + map[string]ResourceProviderFactory{ + "aws": testProviderFuncFixed(p), + }, + ), + }) + + _, err := ctx.Plan() + if err != nil { + t.Fatalf("err: %s", err) + } + + if !p.CloseCalled { + t.Fatal("provider not closed") + } +} + func TestContext2Plan_orphan(t *testing.T) { m := testModule(t, "plan-orphan") p := testProvider("aws") diff --git a/vendor/github.com/hashicorp/terraform/terraform/diff.go b/vendor/github.com/hashicorp/terraform/terraform/diff.go index d6dc550..b6651c0 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/diff.go @@ -396,6 +396,11 @@ type ResourceAttrDiff struct { Type DiffAttrType } +// Modified returns the inequality of Old and New for this attr +func (d *ResourceAttrDiff) Modified() bool { + return d.Old != d.New +} + // Empty returns true if the diff for this attr is neutral func (d *ResourceAttrDiff) Empty() bool { return d.Old == d.New && !d.NewComputed && !d.NewRemoved diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go index bbc2b36..c1def91 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff.go @@ -258,9 +258,11 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error { for _, v := range containers { if v.keepDiff() { // At least one key has changes, so list all the sibling keys - // to keep in the diff. + // to keep in the diff if any values have changed for k := range v { - keep[k] = true + if v[k].Modified() { + keep[k] = true + } } } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_diff_test.go b/vendor/github.com/hashicorp/terraform/terraform/eval_diff_test.go index 1291e69..0da3b0e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_diff_test.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_diff_test.go @@ -3,6 +3,8 @@ package terraform import ( "reflect" "testing" + + "github.com/hashicorp/terraform/config" ) func TestEvalFilterDiff(t *testing.T) { @@ -76,3 +78,69 @@ func TestEvalFilterDiff(t *testing.T) { } } } + +func TestProcessIgnoreChangesOnResourceIgnoredWithRequiresNew(t *testing.T) { + var evalDiff *EvalDiff + var instanceDiff *InstanceDiff + + var testDiffs = func(ignoreChanges []string, newAttribute string) (*EvalDiff, *InstanceDiff) { + return &EvalDiff{ + Resource: &config.Resource{ + Lifecycle: config.ResourceLifecycle{ + IgnoreChanges: ignoreChanges, + }, + }, + }, + &InstanceDiff{ + Destroy: true, + Attributes: map[string]*ResourceAttrDiff{ + "resource.changed": { + RequiresNew: true, + Type: DiffAttrInput, + Old: "old", + New: "new", + }, + "resource.unchanged": { + Old: "unchanged", + New: newAttribute, + }, + }, + } + } + + evalDiff, instanceDiff = testDiffs([]string{"resource.changed"}, "unchanged") + err := evalDiff.processIgnoreChanges(instanceDiff) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(instanceDiff.Attributes) > 0 { + t.Fatalf("Expected all resources to be ignored, found %d", len(instanceDiff.Attributes)) + } + + evalDiff, instanceDiff = testDiffs([]string{}, "unchanged") + err = evalDiff.processIgnoreChanges(instanceDiff) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(instanceDiff.Attributes) != 2 { + t.Fatalf("Expected 2 resources to be found, found %d", len(instanceDiff.Attributes)) + } + + evalDiff, instanceDiff = testDiffs([]string{"resource.changed"}, "changed") + err = evalDiff.processIgnoreChanges(instanceDiff) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(instanceDiff.Attributes) != 1 { + t.Fatalf("Expected 1 resource to be found, found %d", len(instanceDiff.Attributes)) + } + + evalDiff, instanceDiff = testDiffs([]string{}, "changed") + err = evalDiff.processIgnoreChanges(instanceDiff) + if err != nil { + t.Fatalf("err: %s", err) + } + if len(instanceDiff.Attributes) != 2 { + t.Fatalf("Expected 2 resource to be found, found %d", len(instanceDiff.Attributes)) + } +} diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_provider_test.go b/vendor/github.com/hashicorp/terraform/terraform/eval_provider_test.go index cca02a7..8722b3e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_provider_test.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_provider_test.go @@ -90,7 +90,8 @@ func TestEvalInitProvider(t *testing.T) { } func TestEvalCloseProvider(t *testing.T) { - n := &EvalCloseProvider{Name: "foo"} + providerName := ResolveProviderName("foo", nil) + n := &EvalCloseProvider{Name: providerName} provider := &MockResourceProvider{} ctx := &MockEvalContext{CloseProviderProvider: provider} if _, err := n.Eval(ctx); err != nil { @@ -100,7 +101,7 @@ func TestEvalCloseProvider(t *testing.T) { if !ctx.CloseProviderCalled { t.Fatal("should be called") } - if ctx.CloseProviderName != "foo" { + if ctx.CloseProviderName != providerName { t.Fatalf("bad: %#v", ctx.CloseProviderName) } } diff --git a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go index 478aa64..e48af84 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/eval_validate.go @@ -150,6 +150,7 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig) BastionUser interface{} `mapstructure:"bastion_user"` BastionPassword interface{} `mapstructure:"bastion_password"` BastionPrivateKey interface{} `mapstructure:"bastion_private_key"` + AgentIdentity interface{} `mapstructure:"agent_identity"` // For type=winrm only (enforced in winrm communicator) HTTPS interface{} `mapstructure:"https"` diff --git a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go index e97b485..95ef4e9 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/graphtype_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate" @@ -10,7 +10,7 @@ var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125} func (i GraphType) String() string { if i >= GraphType(len(_GraphType_index)-1) { - return fmt.Sprintf("GraphType(%d)", i) + return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")" } return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go index f69267c..b8e7d1f 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/instancetype_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed" @@ -10,7 +10,7 @@ var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44} func (i InstanceType) String() string { if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) { - return fmt.Sprintf("InstanceType(%d)", i) + return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")" } return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go index 52ce1e8..456e7e3 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/interpolate.go +++ b/vendor/github.com/hashicorp/terraform/terraform/interpolate.go @@ -142,7 +142,6 @@ func (i *Interpolater) valueModuleVar( n string, v *config.ModuleVariable, result map[string]ast.Variable) error { - // Build the path to the child module we want path := make([]string, len(scope.Path), len(scope.Path)+1) copy(path, scope.Path) @@ -319,7 +318,6 @@ func (i *Interpolater) valueTerraformVar( n string, v *config.TerraformVariable, result map[string]ast.Variable) error { - // "env" is supported for backward compatibility, but it's deprecated and // so we won't advertise it as being allowed in the error message. It will // be removed in a future version of Terraform. @@ -701,7 +699,6 @@ func (i *Interpolater) computeResourceMultiVariable( func (i *Interpolater) interpolateComplexTypeAttribute( resourceID string, attributes map[string]string) (ast.Variable, error) { - // We can now distinguish between lists and maps in state by the count field: // - lists (and by extension, sets) use the traditional .# notation // - maps use the newer .% notation diff --git a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go index b9f44a0..4594cb6 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go +++ b/vendor/github.com/hashicorp/terraform/terraform/module_dependencies.go @@ -17,7 +17,6 @@ import ( // present in the configuration. This is guaranteed not to happen for any // configuration that has passed a call to Config.Validate(). func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module { - // First we walk the configuration tree to build the overall structure // and capture the explicit/implicit/inherited provider dependencies. deps := moduleTreeConfigDependencies(root, nil) diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource.go b/vendor/github.com/hashicorp/terraform/terraform/resource.go index a8cd8dd..2f5ebb5 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource.go @@ -346,7 +346,7 @@ func (c *ResourceConfig) get( if err != nil { return nil, false } - if i >= int64(cv.Len()) { + if int(i) < 0 || int(i) >= cv.Len() { return nil, false } current = cv.Index(int(i)).Interface() diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go index 73cde0c..4000e3d 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_provider_mock.go @@ -203,6 +203,7 @@ func (p *MockResourceProvider) Diff( p.DiffInfo = info p.DiffState = state p.DiffDesired = desired + if p.DiffFn != nil { return p.DiffFn(info, state, desired) } diff --git a/vendor/github.com/hashicorp/terraform/terraform/resource_test.go b/vendor/github.com/hashicorp/terraform/terraform/resource_test.go index 31d511e..4566492 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/resource_test.go +++ b/vendor/github.com/hashicorp/terraform/terraform/resource_test.go @@ -158,6 +158,14 @@ func TestResourceConfigGet(t *testing.T) { Value: nil, }, + { + Config: map[string]interface{}{ + "foo": []interface{}{1, 2, 5}, + }, + Key: "foo.-1", + Value: nil, + }, + // get from map { Config: map[string]interface{}{ diff --git a/vendor/github.com/hashicorp/terraform/terraform/state.go b/vendor/github.com/hashicorp/terraform/terraform/state.go index 5bc2f8a..6a36348 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/state.go +++ b/vendor/github.com/hashicorp/terraform/terraform/state.go @@ -2174,6 +2174,19 @@ func (s moduleStateSort) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +// StateCompatible returns an error if the state is not compatible with the +// current version of terraform. +func CheckStateVersion(state *State) error { + if state == nil { + return nil + } + + if state.FromFutureTerraform() { + return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion) + } + return nil +} + const stateValidateErrMultiModule = ` Multiple modules with the same path: %s @@ -2182,3 +2195,11 @@ in your state file that point to the same module. This will cause Terraform to behave in unexpected and error prone ways and is invalid. Please back up and modify your state file manually to resolve this. ` + +const stateInvalidTerraformVersionErr = ` +Terraform doesn't allow running any operations against a state +that was written by a future Terraform version. The state is +reporting it is written by Terraform '%s' + +Please run at least that version of Terraform to continue. +` diff --git a/vendor/github.com/hashicorp/terraform/terraform/terraform_test.go b/vendor/github.com/hashicorp/terraform/terraform/terraform_test.go index 2deb441..640741e 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/terraform_test.go +++ b/vendor/github.com/hashicorp/terraform/terraform/terraform_test.go @@ -1675,10 +1675,7 @@ aws_instance.foo: const testTFPlanDiffIgnoreChangesWithFlatmaps = ` UPDATE: aws_instance.foo lst.#: "1" => "2" - lst.0: "j" => "j" lst.1: "" => "k" - set.#: "1" => "1" - set.0.a: "1" => "1" set.0.b: "" => "2" type: "" => "aws_instance" ` diff --git a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go index f8386ef..c4772b4 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go +++ b/vendor/github.com/hashicorp/terraform/terraform/transform_provider.go @@ -138,13 +138,13 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error { p := v.(GraphNodeProvider) // get the close provider of this type if we alread created it - closer := cpm[p.ProviderName()] + closer := cpm[p.Name()] if closer == nil { // create a closer for this provider type - closer = &graphNodeCloseProvider{ProviderNameValue: p.ProviderName()} + closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()} g.Add(closer) - cpm[p.ProviderName()] = closer + cpm[p.Name()] = closer } // Close node depends on the provider itself @@ -336,7 +336,7 @@ type graphNodeCloseProvider struct { } func (n *graphNodeCloseProvider) Name() string { - return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue) + return n.ProviderNameValue + " (close)" } // GraphNodeEvalable impl. diff --git a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go index cbd78dd..4cfc528 100644 --- a/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go +++ b/vendor/github.com/hashicorp/terraform/terraform/walkoperation_string.go @@ -2,7 +2,7 @@ package terraform -import "fmt" +import "strconv" const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport" @@ -10,7 +10,7 @@ var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96} func (i walkOperation) String() string { if i >= walkOperation(len(_walkOperation_index)-1) { - return fmt.Sprintf("walkOperation(%d)", i) + return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")" } return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]] } diff --git a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go index edf9e63..0b1249b 100644 --- a/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go +++ b/vendor/github.com/hashicorp/terraform/tfdiags/severity_string.go @@ -2,18 +2,13 @@ package tfdiags -import "fmt" +import "strconv" const ( _Severity_name_0 = "Error" _Severity_name_1 = "Warning" ) -var ( - _Severity_index_0 = [...]uint8{0, 5} - _Severity_index_1 = [...]uint8{0, 7} -) - func (i Severity) String() string { switch { case i == 69: @@ -21,6 +16,6 @@ func (i Severity) String() string { case i == 87: return _Severity_name_1 default: - return fmt.Sprintf("Severity(%d)", i) + return "Severity(" + strconv.FormatInt(int64(i), 10) + ")" } } diff --git a/vendor/github.com/hashicorp/terraform/version/version.go b/vendor/github.com/hashicorp/terraform/version/version.go index 551fe5e..699ccf7 100644 --- a/vendor/github.com/hashicorp/terraform/version/version.go +++ b/vendor/github.com/hashicorp/terraform/version/version.go @@ -11,7 +11,7 @@ import ( ) // The main version number that is being run at the moment. -const Version = "0.11.1" +const Version = "0.11.2" // A pre-release marker for the version. If this is "" (empty string) // then it means that it is a final release. Otherwise, this is a pre-release diff --git a/vendor/github.com/jmespath/go-jmespath/Makefile b/vendor/github.com/jmespath/go-jmespath/Makefile index a828d28..ad17bf0 100644 --- a/vendor/github.com/jmespath/go-jmespath/Makefile +++ b/vendor/github.com/jmespath/go-jmespath/Makefile @@ -35,7 +35,7 @@ buildfuzz: go-fuzz-build github.com/jmespath/go-jmespath/fuzz fuzz: buildfuzz - go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata + go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus bench: go test -bench . -cpuprofile cpu.out diff --git a/vendor/github.com/jmespath/go-jmespath/api.go b/vendor/github.com/jmespath/go-jmespath/api.go index 9cfa988..67df3fc 100644 --- a/vendor/github.com/jmespath/go-jmespath/api.go +++ b/vendor/github.com/jmespath/go-jmespath/api.go @@ -1,42 +1,5 @@ package jmespath -import "strconv" - -// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is -// safe for concurrent use by multiple goroutines. -type JMESPath struct { - ast ASTNode - intr *treeInterpreter -} - -// Compile parses a JMESPath expression and returns, if successful, a JMESPath -// object that can be used to match against data. -func Compile(expression string) (*JMESPath, error) { - parser := NewParser() - ast, err := parser.Parse(expression) - if err != nil { - return nil, err - } - jmespath := &JMESPath{ast: ast, intr: newInterpreter()} - return jmespath, nil -} - -// MustCompile is like Compile but panics if the expression cannot be parsed. -// It simplifies safe initialization of global variables holding compiled -// JMESPaths. -func MustCompile(expression string) *JMESPath { - jmespath, err := Compile(expression) - if err != nil { - panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error()) - } - return jmespath -} - -// Search evaluates a JMESPath expression against input data and returns the result. -func (jp *JMESPath) Search(data interface{}) (interface{}, error) { - return jp.intr.Execute(jp.ast, data) -} - // Search evaluates a JMESPath expression against input data and returns the result. func Search(expression string, data interface{}) (interface{}, error) { intr := newInterpreter() diff --git a/vendor/github.com/jmespath/go-jmespath/api_test.go b/vendor/github.com/jmespath/go-jmespath/api_test.go deleted file mode 100644 index b0b106d..0000000 --- a/vendor/github.com/jmespath/go-jmespath/api_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package jmespath - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestValidPrecompiledExpressionSearches(t *testing.T) { - assert := assert.New(t) - data := make(map[string]interface{}) - data["foo"] = "bar" - precompiled, err := Compile("foo") - assert.Nil(err) - result, err := precompiled.Search(data) - assert.Nil(err) - assert.Equal("bar", result) -} - -func TestInvalidPrecompileErrors(t *testing.T) { - assert := assert.New(t) - _, err := Compile("not a valid expression") - assert.NotNil(err) -} - -func TestInvalidMustCompilePanics(t *testing.T) { - defer func() { - r := recover() - assert.NotNil(t, r) - }() - MustCompile("not a valid expression") -} diff --git a/vendor/github.com/jmespath/go-jmespath/functions.go b/vendor/github.com/jmespath/go-jmespath/functions.go index 9b7cd89..8a3f2ef 100644 --- a/vendor/github.com/jmespath/go-jmespath/functions.go +++ b/vendor/github.com/jmespath/go-jmespath/functions.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "math" - "reflect" "sort" "strconv" "strings" @@ -125,197 +124,197 @@ type functionCaller struct { func newFunctionCaller() *functionCaller { caller := &functionCaller{} caller.functionTable = map[string]functionEntry{ - "length": { + "length": functionEntry{ name: "length", arguments: []argSpec{ - {types: []jpType{jpString, jpArray, jpObject}}, + argSpec{types: []jpType{jpString, jpArray, jpObject}}, }, handler: jpfLength, }, - "starts_with": { + "starts_with": functionEntry{ name: "starts_with", arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, }, handler: jpfStartsWith, }, - "abs": { + "abs": functionEntry{ name: "abs", arguments: []argSpec{ - {types: []jpType{jpNumber}}, + argSpec{types: []jpType{jpNumber}}, }, handler: jpfAbs, }, - "avg": { + "avg": functionEntry{ name: "avg", arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, + argSpec{types: []jpType{jpArrayNumber}}, }, handler: jpfAvg, }, - "ceil": { + "ceil": functionEntry{ name: "ceil", arguments: []argSpec{ - {types: []jpType{jpNumber}}, + argSpec{types: []jpType{jpNumber}}, }, handler: jpfCeil, }, - "contains": { + "contains": functionEntry{ name: "contains", arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, - {types: []jpType{jpAny}}, + argSpec{types: []jpType{jpArray, jpString}}, + argSpec{types: []jpType{jpAny}}, }, handler: jpfContains, }, - "ends_with": { + "ends_with": functionEntry{ name: "ends_with", arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpString}}, }, handler: jpfEndsWith, }, - "floor": { + "floor": functionEntry{ name: "floor", arguments: []argSpec{ - {types: []jpType{jpNumber}}, + argSpec{types: []jpType{jpNumber}}, }, handler: jpfFloor, }, - "map": { + "map": functionEntry{ name: "amp", arguments: []argSpec{ - {types: []jpType{jpExpref}}, - {types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, }, handler: jpfMap, hasExpRef: true, }, - "max": { + "max": functionEntry{ name: "max", arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMax, }, - "merge": { + "merge": functionEntry{ name: "merge", arguments: []argSpec{ - {types: []jpType{jpObject}, variadic: true}, + argSpec{types: []jpType{jpObject}, variadic: true}, }, handler: jpfMerge, }, - "max_by": { + "max_by": functionEntry{ name: "max_by", arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, }, handler: jpfMaxBy, hasExpRef: true, }, - "sum": { + "sum": functionEntry{ name: "sum", arguments: []argSpec{ - {types: []jpType{jpArrayNumber}}, + argSpec{types: []jpType{jpArrayNumber}}, }, handler: jpfSum, }, - "min": { + "min": functionEntry{ name: "min", arguments: []argSpec{ - {types: []jpType{jpArrayNumber, jpArrayString}}, + argSpec{types: []jpType{jpArrayNumber, jpArrayString}}, }, handler: jpfMin, }, - "min_by": { + "min_by": functionEntry{ name: "min_by", arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, }, handler: jpfMinBy, hasExpRef: true, }, - "type": { + "type": functionEntry{ name: "type", arguments: []argSpec{ - {types: []jpType{jpAny}}, + argSpec{types: []jpType{jpAny}}, }, handler: jpfType, }, - "keys": { + "keys": functionEntry{ name: "keys", arguments: []argSpec{ - {types: []jpType{jpObject}}, + argSpec{types: []jpType{jpObject}}, }, handler: jpfKeys, }, - "values": { + "values": functionEntry{ name: "values", arguments: []argSpec{ - {types: []jpType{jpObject}}, + argSpec{types: []jpType{jpObject}}, }, handler: jpfValues, }, - "sort": { + "sort": functionEntry{ name: "sort", arguments: []argSpec{ - {types: []jpType{jpArrayString, jpArrayNumber}}, + argSpec{types: []jpType{jpArrayString, jpArrayNumber}}, }, handler: jpfSort, }, - "sort_by": { + "sort_by": functionEntry{ name: "sort_by", arguments: []argSpec{ - {types: []jpType{jpArray}}, - {types: []jpType{jpExpref}}, + argSpec{types: []jpType{jpArray}}, + argSpec{types: []jpType{jpExpref}}, }, handler: jpfSortBy, hasExpRef: true, }, - "join": { + "join": functionEntry{ name: "join", arguments: []argSpec{ - {types: []jpType{jpString}}, - {types: []jpType{jpArrayString}}, + argSpec{types: []jpType{jpString}}, + argSpec{types: []jpType{jpArrayString}}, }, handler: jpfJoin, }, - "reverse": { + "reverse": functionEntry{ name: "reverse", arguments: []argSpec{ - {types: []jpType{jpArray, jpString}}, + argSpec{types: []jpType{jpArray, jpString}}, }, handler: jpfReverse, }, - "to_array": { + "to_array": functionEntry{ name: "to_array", arguments: []argSpec{ - {types: []jpType{jpAny}}, + argSpec{types: []jpType{jpAny}}, }, handler: jpfToArray, }, - "to_string": { + "to_string": functionEntry{ name: "to_string", arguments: []argSpec{ - {types: []jpType{jpAny}}, + argSpec{types: []jpType{jpAny}}, }, handler: jpfToString, }, - "to_number": { + "to_number": functionEntry{ name: "to_number", arguments: []argSpec{ - {types: []jpType{jpAny}}, + argSpec{types: []jpType{jpAny}}, }, handler: jpfToNumber, }, - "not_null": { + "not_null": functionEntry{ name: "not_null", arguments: []argSpec{ - {types: []jpType{jpAny}, variadic: true}, + argSpec{types: []jpType{jpAny}, variadic: true}, }, handler: jpfNotNull, }, @@ -358,7 +357,7 @@ func (a *argSpec) typeCheck(arg interface{}) error { return nil } case jpArray: - if isSliceType(arg) { + if _, ok := arg.([]interface{}); ok { return nil } case jpObject: @@ -410,9 +409,8 @@ func jpfLength(arguments []interface{}) (interface{}, error) { arg := arguments[0] if c, ok := arg.(string); ok { return float64(utf8.RuneCountInString(c)), nil - } else if isSliceType(arg) { - v := reflect.ValueOf(arg) - return float64(v.Len()), nil + } else if c, ok := arg.([]interface{}); ok { + return float64(len(c)), nil } else if c, ok := arg.(map[string]interface{}); ok { return float64(len(c)), nil } diff --git a/vendor/github.com/jmespath/go-jmespath/interpreter_test.go b/vendor/github.com/jmespath/go-jmespath/interpreter_test.go index 11c6d0a..5b529c4 100644 --- a/vendor/github.com/jmespath/go-jmespath/interpreter_test.go +++ b/vendor/github.com/jmespath/go-jmespath/interpreter_test.go @@ -69,7 +69,7 @@ func TestCanSupportUserDefinedStructsRef(t *testing.T) { func TestCanSupportStructWithSliceAll(t *testing.T) { assert := assert.New(t) - data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} result, err := Search("B[].Foo", data) assert.Nil(err) assert.Equal([]interface{}{"f1", "correct"}, result) @@ -77,7 +77,7 @@ func TestCanSupportStructWithSliceAll(t *testing.T) { func TestCanSupportStructWithSlicingExpression(t *testing.T) { assert := assert.New(t) - data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} result, err := Search("B[:].Foo", data) assert.Nil(err) assert.Equal([]interface{}{"f1", "correct"}, result) @@ -85,7 +85,7 @@ func TestCanSupportStructWithSlicingExpression(t *testing.T) { func TestCanSupportStructWithFilterProjection(t *testing.T) { assert := assert.New(t) - data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} result, err := Search("B[? `true` ].Foo", data) assert.Nil(err) assert.Equal([]interface{}{"f1", "correct"}, result) @@ -93,7 +93,7 @@ func TestCanSupportStructWithFilterProjection(t *testing.T) { func TestCanSupportStructWithSlice(t *testing.T) { assert := assert.New(t) - data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} result, err := Search("B[-1].Foo", data) assert.Nil(err) assert.Equal("correct", result) @@ -109,7 +109,7 @@ func TestCanSupportStructWithOrExpressions(t *testing.T) { func TestCanSupportStructWithSlicePointer(t *testing.T) { assert := assert.New(t) - data := sliceType{A: "foo", C: []*scalars{{"f1", "b1"}, {"correct", "b2"}}} + data := sliceType{A: "foo", C: []*scalars{&scalars{"f1", "b1"}, &scalars{"correct", "b2"}}} result, err := Search("C[-1].Foo", data) assert.Nil(err) assert.Equal("correct", result) @@ -128,7 +128,7 @@ func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) { func TestCanSupportStructWithSliceLowerCased(t *testing.T) { assert := assert.New(t) - data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}} + data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}} result, err := Search("b[-1].foo", data) assert.Nil(err) assert.Equal("correct", result) @@ -173,14 +173,6 @@ func TestCanSupportProjectionsWithStructs(t *testing.T) { assert.Equal([]interface{}{"first", "second", "third"}, result) } -func TestCanSupportSliceOfStructsWithFunctions(t *testing.T) { - assert := assert.New(t) - data := []scalars{scalars{"a1", "b1"}, scalars{"a2", "b2"}} - result, err := Search("length(@)", data) - assert.Nil(err) - assert.Equal(result.(float64), 2.0) -} - func BenchmarkInterpretSingleFieldStruct(b *testing.B) { intr := newInterpreter() parser := NewParser() diff --git a/vendor/github.com/jmespath/go-jmespath/lexer_test.go b/vendor/github.com/jmespath/go-jmespath/lexer_test.go index d13a042..7a9a9ee 100644 --- a/vendor/github.com/jmespath/go-jmespath/lexer_test.go +++ b/vendor/github.com/jmespath/go-jmespath/lexer_test.go @@ -11,63 +11,63 @@ var lexingTests = []struct { expression string expected []token }{ - {"*", []token{{tStar, "*", 0, 1}}}, - {".", []token{{tDot, ".", 0, 1}}}, - {"[?", []token{{tFilter, "[?", 0, 2}}}, - {"[]", []token{{tFlatten, "[]", 0, 2}}}, - {"(", []token{{tLparen, "(", 0, 1}}}, - {")", []token{{tRparen, ")", 0, 1}}}, - {"[", []token{{tLbracket, "[", 0, 1}}}, - {"]", []token{{tRbracket, "]", 0, 1}}}, - {"{", []token{{tLbrace, "{", 0, 1}}}, - {"}", []token{{tRbrace, "}", 0, 1}}}, - {"||", []token{{tOr, "||", 0, 2}}}, - {"|", []token{{tPipe, "|", 0, 1}}}, - {"29", []token{{tNumber, "29", 0, 2}}}, - {"2", []token{{tNumber, "2", 0, 1}}}, - {"0", []token{{tNumber, "0", 0, 1}}}, - {"-20", []token{{tNumber, "-20", 0, 3}}}, - {"foo", []token{{tUnquotedIdentifier, "foo", 0, 3}}}, - {`"bar"`, []token{{tQuotedIdentifier, "bar", 0, 3}}}, + {"*", []token{token{tStar, "*", 0, 1}}}, + {".", []token{token{tDot, ".", 0, 1}}}, + {"[?", []token{token{tFilter, "[?", 0, 2}}}, + {"[]", []token{token{tFlatten, "[]", 0, 2}}}, + {"(", []token{token{tLparen, "(", 0, 1}}}, + {")", []token{token{tRparen, ")", 0, 1}}}, + {"[", []token{token{tLbracket, "[", 0, 1}}}, + {"]", []token{token{tRbracket, "]", 0, 1}}}, + {"{", []token{token{tLbrace, "{", 0, 1}}}, + {"}", []token{token{tRbrace, "}", 0, 1}}}, + {"||", []token{token{tOr, "||", 0, 2}}}, + {"|", []token{token{tPipe, "|", 0, 1}}}, + {"29", []token{token{tNumber, "29", 0, 2}}}, + {"2", []token{token{tNumber, "2", 0, 1}}}, + {"0", []token{token{tNumber, "0", 0, 1}}}, + {"-20", []token{token{tNumber, "-20", 0, 3}}}, + {"foo", []token{token{tUnquotedIdentifier, "foo", 0, 3}}}, + {`"bar"`, []token{token{tQuotedIdentifier, "bar", 0, 3}}}, // Escaping the delimiter - {`"bar\"baz"`, []token{{tQuotedIdentifier, `bar"baz`, 0, 7}}}, - {",", []token{{tComma, ",", 0, 1}}}, - {":", []token{{tColon, ":", 0, 1}}}, - {"<", []token{{tLT, "<", 0, 1}}}, - {"<=", []token{{tLTE, "<=", 0, 2}}}, - {">", []token{{tGT, ">", 0, 1}}}, - {">=", []token{{tGTE, ">=", 0, 2}}}, - {"==", []token{{tEQ, "==", 0, 2}}}, - {"!=", []token{{tNE, "!=", 0, 2}}}, - {"`[0, 1, 2]`", []token{{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, - {"'foo'", []token{{tStringLiteral, "foo", 1, 3}}}, - {"'a'", []token{{tStringLiteral, "a", 1, 1}}}, - {`'foo\'bar'`, []token{{tStringLiteral, "foo'bar", 1, 7}}}, - {"@", []token{{tCurrent, "@", 0, 1}}}, - {"&", []token{{tExpref, "&", 0, 1}}}, + {`"bar\"baz"`, []token{token{tQuotedIdentifier, `bar"baz`, 0, 7}}}, + {",", []token{token{tComma, ",", 0, 1}}}, + {":", []token{token{tColon, ":", 0, 1}}}, + {"<", []token{token{tLT, "<", 0, 1}}}, + {"<=", []token{token{tLTE, "<=", 0, 2}}}, + {">", []token{token{tGT, ">", 0, 1}}}, + {">=", []token{token{tGTE, ">=", 0, 2}}}, + {"==", []token{token{tEQ, "==", 0, 2}}}, + {"!=", []token{token{tNE, "!=", 0, 2}}}, + {"`[0, 1, 2]`", []token{token{tJSONLiteral, "[0, 1, 2]", 1, 9}}}, + {"'foo'", []token{token{tStringLiteral, "foo", 1, 3}}}, + {"'a'", []token{token{tStringLiteral, "a", 1, 1}}}, + {`'foo\'bar'`, []token{token{tStringLiteral, "foo'bar", 1, 7}}}, + {"@", []token{token{tCurrent, "@", 0, 1}}}, + {"&", []token{token{tExpref, "&", 0, 1}}}, // Quoted identifier unicode escape sequences - {`"\u2713"`, []token{{tQuotedIdentifier, "✓", 0, 3}}}, - {`"\\"`, []token{{tQuotedIdentifier, `\`, 0, 1}}}, - {"`\"foo\"`", []token{{tJSONLiteral, "\"foo\"", 1, 5}}}, + {`"\u2713"`, []token{token{tQuotedIdentifier, "✓", 0, 3}}}, + {`"\\"`, []token{token{tQuotedIdentifier, `\`, 0, 1}}}, + {"`\"foo\"`", []token{token{tJSONLiteral, "\"foo\"", 1, 5}}}, // Combinations of tokens. {"foo.bar", []token{ - {tUnquotedIdentifier, "foo", 0, 3}, - {tDot, ".", 3, 1}, - {tUnquotedIdentifier, "bar", 4, 3}, + token{tUnquotedIdentifier, "foo", 0, 3}, + token{tDot, ".", 3, 1}, + token{tUnquotedIdentifier, "bar", 4, 3}, }}, {"foo[0]", []token{ - {tUnquotedIdentifier, "foo", 0, 3}, - {tLbracket, "[", 3, 1}, - {tNumber, "0", 4, 1}, - {tRbracket, "]", 5, 1}, + token{tUnquotedIdentifier, "foo", 0, 3}, + token{tLbracket, "[", 3, 1}, + token{tNumber, "0", 4, 1}, + token{tRbracket, "]", 5, 1}, }}, {"foo[?a= Go 1.x. Code that requires Go 1.x or later should + // say "+build go1.x", and code that should only be built before Go 1.x + // (perhaps it is the stub to use in that case) should say "+build !go1.x". + // NOTE: If you add to this list, also update the doc comment in doc.go. + c.ReleaseTags = []string{"go1.1", "go1.2", "go1.3", "go1.4", "go1.5", "go1.6", "go1.7", "go1.8", "go1.9"} + + env := os.Getenv("CGO_ENABLED") + if env == "" { + env = defaultCGO_ENABLED + } + switch env { + case "1": + c.CgoEnabled = true + case "0": + c.CgoEnabled = false + default: + // cgo must be explicitly enabled for cross compilation builds + if runtime.GOARCH == c.GOARCH && runtime.GOOS == c.GOOS { + c.CgoEnabled = cgoEnabled[c.GOOS+"/"+c.GOARCH] + break + } + c.CgoEnabled = false + } + + return c +} + +func envOr(name, def string) string { + s := os.Getenv(name) + if s == "" { + return def + } + return s +} + +// An ImportMode controls the behavior of the Import method. +type ImportMode uint + +const ( + // If FindOnly is set, Import stops after locating the directory + // that should contain the sources for a package. It does not + // read any files in the directory. + FindOnly ImportMode = 1 << iota + + // If AllowBinary is set, Import can be satisfied by a compiled + // package object without corresponding sources. + // + // Deprecated: + // The supported way to create a compiled-only package is to + // write source code containing a //go:binary-only-package comment at + // the top of the file. Such a package will be recognized + // regardless of this flag setting (because it has source code) + // and will have BinaryOnly set to true in the returned Package. + AllowBinary + + // If ImportComment is set, parse import comments on package statements. + // Import returns an error if it finds a comment it cannot understand + // or finds conflicting comments in multiple source files. + // See golang.org/s/go14customimport for more information. + ImportComment + + // By default, Import searches vendor directories + // that apply in the given source directory before searching + // the GOROOT and GOPATH roots. + // If an Import finds and returns a package using a vendor + // directory, the resulting ImportPath is the complete path + // to the package, including the path elements leading up + // to and including "vendor". + // For example, if Import("y", "x/subdir", 0) finds + // "x/vendor/y", the returned package's ImportPath is "x/vendor/y", + // not plain "y". + // See golang.org/s/go15vendor for more information. + // + // Setting IgnoreVendor ignores vendor directories. + // + // In contrast to the package's ImportPath, + // the returned package's Imports, TestImports, and XTestImports + // are always the exact import paths from the source files: + // Import makes no attempt to resolve or check those paths. + IgnoreVendor +) + +// A Package describes the Go package found in a directory. +type Package struct { + Dir string // directory containing package sources + Name string // package name + ImportComment string // path in import comment on package statement + Doc string // documentation synopsis + ImportPath string // import path of package ("" if unknown) + Root string // root of Go tree where this package lives + SrcRoot string // package source root directory ("" if unknown) + PkgRoot string // package install root directory ("" if unknown) + PkgTargetRoot string // architecture dependent install root directory ("" if unknown) + BinDir string // command install directory ("" if unknown) + Goroot bool // package found in Go root + PkgObj string // installed .a file + AllTags []string // tags that can influence file selection in this directory + ConflictDir string // this directory shadows Dir in $GOPATH + BinaryOnly bool // cannot be rebuilt from source (has //go:binary-only-package comment) + + // Source files + GoFiles []string // .go source files (excluding CgoFiles, TestGoFiles, XTestGoFiles) + CgoFiles []string // .go source files that import "C" + IgnoredGoFiles []string // .go source files ignored for this build + InvalidGoFiles []string // .go source files with detected problems (parse error, wrong package name, and so on) + CFiles []string // .c source files + CXXFiles []string // .cc, .cpp and .cxx source files + MFiles []string // .m (Objective-C) source files + HFiles []string // .h, .hh, .hpp and .hxx source files + FFiles []string // .f, .F, .for and .f90 Fortran source files + SFiles []string // .s source files + SwigFiles []string // .swig files + SwigCXXFiles []string // .swigcxx files + SysoFiles []string // .syso system object files to add to archive + + // Cgo directives + CgoCFLAGS []string // Cgo CFLAGS directives + CgoCPPFLAGS []string // Cgo CPPFLAGS directives + CgoCXXFLAGS []string // Cgo CXXFLAGS directives + CgoFFLAGS []string // Cgo FFLAGS directives + CgoLDFLAGS []string // Cgo LDFLAGS directives + CgoPkgConfig []string // Cgo pkg-config directives + + // Dependency information + Imports []string // import paths from GoFiles, CgoFiles + ImportPos map[string][]token.Position // line information for Imports + + // Test information + TestGoFiles []string // _test.go files in package + TestImports []string // import paths from TestGoFiles + TestImportPos map[string][]token.Position // line information for TestImports + XTestGoFiles []string // _test.go files outside package + XTestImports []string // import paths from XTestGoFiles + XTestImportPos map[string][]token.Position // line information for XTestImports +} + +// IsCommand reports whether the package is considered a +// command to be installed (not just a library). +// Packages named "main" are treated as commands. +func (p *Package) IsCommand() bool { + return p.Name == "main" +} + +// ImportDir is like Import but processes the Go package found in +// the named directory. +func (ctxt *Context) ImportDir(dir string, mode ImportMode) (*Package, error) { + return ctxt.Import(".", dir, mode) +} + +// NoGoError is the error used by Import to describe a directory +// containing no buildable Go source files. (It may still contain +// test files, files hidden by build tags, and so on.) +type NoGoError struct { + Dir string +} + +func (e *NoGoError) Error() string { + return "no buildable Go source files in " + e.Dir +} + +// MultiplePackageError describes a directory containing +// multiple buildable Go source files for multiple packages. +type MultiplePackageError struct { + Dir string // directory containing files + Packages []string // package names found + Files []string // corresponding files: Files[i] declares package Packages[i] +} + +func (e *MultiplePackageError) Error() string { + // Error string limited to two entries for compatibility. + return fmt.Sprintf("found packages %s (%s) and %s (%s) in %s", e.Packages[0], e.Files[0], e.Packages[1], e.Files[1], e.Dir) +} + +func nameExt(name string) string { + i := strings.LastIndex(name, ".") + if i < 0 { + return "" + } + return name[i:] +} + +// Import returns details about the Go package named by the import path, +// interpreting local import paths relative to the srcDir directory. +// If the path is a local import path naming a package that can be imported +// using a standard import path, the returned package will set p.ImportPath +// to that path. +// +// In the directory containing the package, .go, .c, .h, and .s files are +// considered part of the package except for: +// +// - .go files in package documentation +// - files starting with _ or . (likely editor temporary files) +// - files with build constraints not satisfied by the context +// +// If an error occurs, Import returns a non-nil error and a non-nil +// *Package containing partial information. +// +func (ctxt *Context) Import(path string, srcDir string, mode ImportMode) (*Package, error) { + p := &Package{ + ImportPath: path, + } + if path == "" { + return p, fmt.Errorf("import %q: invalid import path", path) + } + + var pkgtargetroot string + var pkga string + var pkgerr error + suffix := "" + if ctxt.InstallSuffix != "" { + suffix = "_" + ctxt.InstallSuffix + } + switch ctxt.Compiler { + case "gccgo": + pkgtargetroot = "pkg/gccgo_" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix + case "gc": + pkgtargetroot = "pkg/" + ctxt.GOOS + "_" + ctxt.GOARCH + suffix + default: + // Save error for end of function. + pkgerr = fmt.Errorf("import %q: unknown compiler %q", path, ctxt.Compiler) + } + setPkga := func() { + switch ctxt.Compiler { + case "gccgo": + dir, elem := pathpkg.Split(p.ImportPath) + pkga = pkgtargetroot + "/" + dir + "lib" + elem + ".a" + case "gc": + pkga = pkgtargetroot + "/" + p.ImportPath + ".a" + } + } + setPkga() + + binaryOnly := false + if IsLocalImport(path) { + pkga = "" // local imports have no installed path + if srcDir == "" { + return p, fmt.Errorf("import %q: import relative to unknown directory", path) + } + if !ctxt.isAbsPath(path) { + p.Dir = ctxt.joinPath(srcDir, path) + } + // p.Dir directory may or may not exist. Gather partial information first, check if it exists later. + // Determine canonical import path, if any. + // Exclude results where the import path would include /testdata/. + inTestdata := func(sub string) bool { + return strings.Contains(sub, "/testdata/") || strings.HasSuffix(sub, "/testdata") || strings.HasPrefix(sub, "testdata/") || sub == "testdata" + } + if ctxt.GOROOT != "" { + root := ctxt.joinPath(ctxt.GOROOT, "src") + if sub, ok := ctxt.hasSubdir(root, p.Dir); ok && !inTestdata(sub) { + p.Goroot = true + p.ImportPath = sub + p.Root = ctxt.GOROOT + goto Found + } + } + all := ctxt.gopath() + for i, root := range all { + rootsrc := ctxt.joinPath(root, "src") + if sub, ok := ctxt.hasSubdir(rootsrc, p.Dir); ok && !inTestdata(sub) { + // We found a potential import path for dir, + // but check that using it wouldn't find something + // else first. + if ctxt.GOROOT != "" { + if dir := ctxt.joinPath(ctxt.GOROOT, "src", sub); ctxt.isDir(dir) { + p.ConflictDir = dir + goto Found + } + } + for _, earlyRoot := range all[:i] { + if dir := ctxt.joinPath(earlyRoot, "src", sub); ctxt.isDir(dir) { + p.ConflictDir = dir + goto Found + } + } + + // sub would not name some other directory instead of this one. + // Record it. + p.ImportPath = sub + p.Root = root + goto Found + } + } + // It's okay that we didn't find a root containing dir. + // Keep going with the information we have. + } else { + if strings.HasPrefix(path, "/") { + return p, fmt.Errorf("import %q: cannot import absolute path", path) + } + + // tried records the location of unsuccessful package lookups + var tried struct { + vendor []string + goroot string + gopath []string + } + gopath := ctxt.gopath() + + // Vendor directories get first chance to satisfy import. + if mode&IgnoreVendor == 0 && srcDir != "" { + searchVendor := func(root string, isGoroot bool) bool { + sub, ok := ctxt.hasSubdir(root, srcDir) + if !ok || !strings.HasPrefix(sub, "src/") || strings.Contains(sub, "/testdata/") { + return false + } + for { + vendor := ctxt.joinPath(root, sub, "vendor") + if ctxt.isDir(vendor) { + dir := ctxt.joinPath(vendor, path) + if ctxt.isDir(dir) && hasGoFiles(ctxt, dir) { + p.Dir = dir + p.ImportPath = strings.TrimPrefix(pathpkg.Join(sub, "vendor", path), "src/") + p.Goroot = isGoroot + p.Root = root + setPkga() // p.ImportPath changed + return true + } + tried.vendor = append(tried.vendor, dir) + } + i := strings.LastIndex(sub, "/") + if i < 0 { + break + } + sub = sub[:i] + } + return false + } + if searchVendor(ctxt.GOROOT, true) { + goto Found + } + for _, root := range gopath { + if searchVendor(root, false) { + goto Found + } + } + } + + // Determine directory from import path. + if ctxt.GOROOT != "" { + dir := ctxt.joinPath(ctxt.GOROOT, "src", path) + isDir := ctxt.isDir(dir) + binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(ctxt.GOROOT, pkga)) + if isDir || binaryOnly { + p.Dir = dir + p.Goroot = true + p.Root = ctxt.GOROOT + goto Found + } + tried.goroot = dir + } + for _, root := range gopath { + dir := ctxt.joinPath(root, "src", path) + isDir := ctxt.isDir(dir) + binaryOnly = !isDir && mode&AllowBinary != 0 && pkga != "" && ctxt.isFile(ctxt.joinPath(root, pkga)) + if isDir || binaryOnly { + p.Dir = dir + p.Root = root + goto Found + } + tried.gopath = append(tried.gopath, dir) + } + + // package was not found + var paths []string + format := "\t%s (vendor tree)" + for _, dir := range tried.vendor { + paths = append(paths, fmt.Sprintf(format, dir)) + format = "\t%s" + } + if tried.goroot != "" { + paths = append(paths, fmt.Sprintf("\t%s (from $GOROOT)", tried.goroot)) + } else { + paths = append(paths, "\t($GOROOT not set)") + } + format = "\t%s (from $GOPATH)" + for _, dir := range tried.gopath { + paths = append(paths, fmt.Sprintf(format, dir)) + format = "\t%s" + } + if len(tried.gopath) == 0 { + paths = append(paths, "\t($GOPATH not set. For more details see: 'go help gopath')") + } + return p, fmt.Errorf("cannot find package %q in any of:\n%s", path, strings.Join(paths, "\n")) + } + +Found: + if p.Root != "" { + p.SrcRoot = ctxt.joinPath(p.Root, "src") + p.PkgRoot = ctxt.joinPath(p.Root, "pkg") + p.BinDir = ctxt.joinPath(p.Root, "bin") + if pkga != "" { + p.PkgTargetRoot = ctxt.joinPath(p.Root, pkgtargetroot) + p.PkgObj = ctxt.joinPath(p.Root, pkga) + } + } + + // If it's a local import path, by the time we get here, we still haven't checked + // that p.Dir directory exists. This is the right time to do that check. + // We can't do it earlier, because we want to gather partial information for the + // non-nil *Package returned when an error occurs. + // We need to do this before we return early on FindOnly flag. + if IsLocalImport(path) && !ctxt.isDir(p.Dir) { + // package was not found + return p, fmt.Errorf("cannot find package %q in:\n\t%s", path, p.Dir) + } + + if mode&FindOnly != 0 { + return p, pkgerr + } + if binaryOnly && (mode&AllowBinary) != 0 { + return p, pkgerr + } + + dirs, err := ctxt.readDir(p.Dir) + if err != nil { + return p, err + } + + var badGoError error + var Sfiles []string // files with ".S" (capital S) + var firstFile, firstCommentFile string + imported := make(map[string][]token.Position) + testImported := make(map[string][]token.Position) + xTestImported := make(map[string][]token.Position) + allTags := make(map[string]bool) + fset := token.NewFileSet() + for _, d := range dirs { + if d.IsDir() { + continue + } + + name := d.Name() + ext := nameExt(name) + + badFile := func(err error) { + if badGoError == nil { + badGoError = err + } + p.InvalidGoFiles = append(p.InvalidGoFiles, name) + } + + match, data, filename, err := ctxt.matchFile(p.Dir, name, allTags, &p.BinaryOnly) + if err != nil { + badFile(err) + continue + } + if !match { + if ext == ".go" { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + } + continue + } + + // Going to save the file. For non-Go files, can stop here. + switch ext { + case ".c": + p.CFiles = append(p.CFiles, name) + continue + case ".cc", ".cpp", ".cxx": + p.CXXFiles = append(p.CXXFiles, name) + continue + case ".m": + p.MFiles = append(p.MFiles, name) + continue + case ".h", ".hh", ".hpp", ".hxx": + p.HFiles = append(p.HFiles, name) + continue + case ".f", ".F", ".for", ".f90": + p.FFiles = append(p.FFiles, name) + continue + case ".s": + p.SFiles = append(p.SFiles, name) + continue + case ".S": + Sfiles = append(Sfiles, name) + continue + case ".swig": + p.SwigFiles = append(p.SwigFiles, name) + continue + case ".swigcxx": + p.SwigCXXFiles = append(p.SwigCXXFiles, name) + continue + case ".syso": + // binary objects to add to package archive + // Likely of the form foo_windows.syso, but + // the name was vetted above with goodOSArchFile. + p.SysoFiles = append(p.SysoFiles, name) + continue + } + + pf, err := parser.ParseFile(fset, filename, data, parser.ImportsOnly|parser.ParseComments) + if err != nil { + badFile(err) + continue + } + + pkg := pf.Name.Name + if pkg == "documentation" { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + continue + } + + isTest := strings.HasSuffix(name, "_test.go") + isXTest := false + if isTest && strings.HasSuffix(pkg, "_test") { + isXTest = true + pkg = pkg[:len(pkg)-len("_test")] + } + + if p.Name == "" { + p.Name = pkg + firstFile = name + } else if pkg != p.Name { + badFile(&MultiplePackageError{ + Dir: p.Dir, + Packages: []string{p.Name, pkg}, + Files: []string{firstFile, name}, + }) + p.InvalidGoFiles = append(p.InvalidGoFiles, name) + } + if pf.Doc != nil && p.Doc == "" { + p.Doc = doc.Synopsis(pf.Doc.Text()) + } + + if mode&ImportComment != 0 { + qcom, line := findImportComment(data) + if line != 0 { + com, err := strconv.Unquote(qcom) + if err != nil { + badFile(fmt.Errorf("%s:%d: cannot parse import comment", filename, line)) + } else if p.ImportComment == "" { + p.ImportComment = com + firstCommentFile = name + } else if p.ImportComment != com { + badFile(fmt.Errorf("found import comments %q (%s) and %q (%s) in %s", p.ImportComment, firstCommentFile, com, name, p.Dir)) + } + } + } + + // Record imports and information about cgo. + isCgo := false + for _, decl := range pf.Decls { + d, ok := decl.(*ast.GenDecl) + if !ok { + continue + } + for _, dspec := range d.Specs { + spec, ok := dspec.(*ast.ImportSpec) + if !ok { + continue + } + quoted := spec.Path.Value + path, err := strconv.Unquote(quoted) + if err != nil { + log.Panicf("%s: parser returned invalid quoted string: <%s>", filename, quoted) + } + if isXTest { + xTestImported[path] = append(xTestImported[path], fset.Position(spec.Pos())) + } else if isTest { + testImported[path] = append(testImported[path], fset.Position(spec.Pos())) + } else { + imported[path] = append(imported[path], fset.Position(spec.Pos())) + } + if path == "C" { + if isTest { + badFile(fmt.Errorf("use of cgo in test %s not supported", filename)) + } else { + cg := spec.Doc + if cg == nil && len(d.Specs) == 1 { + cg = d.Doc + } + if cg != nil { + if err := ctxt.saveCgo(filename, p, cg); err != nil { + badFile(err) + } + } + isCgo = true + } + } + } + } + if isCgo { + allTags["cgo"] = true + if ctxt.CgoEnabled { + p.CgoFiles = append(p.CgoFiles, name) + } else { + p.IgnoredGoFiles = append(p.IgnoredGoFiles, name) + } + } else if isXTest { + p.XTestGoFiles = append(p.XTestGoFiles, name) + } else if isTest { + p.TestGoFiles = append(p.TestGoFiles, name) + } else { + p.GoFiles = append(p.GoFiles, name) + } + } + if badGoError != nil { + return p, badGoError + } + if len(p.GoFiles)+len(p.CgoFiles)+len(p.TestGoFiles)+len(p.XTestGoFiles) == 0 { + return p, &NoGoError{p.Dir} + } + + for tag := range allTags { + p.AllTags = append(p.AllTags, tag) + } + sort.Strings(p.AllTags) + + p.Imports, p.ImportPos = cleanImports(imported) + p.TestImports, p.TestImportPos = cleanImports(testImported) + p.XTestImports, p.XTestImportPos = cleanImports(xTestImported) + + // add the .S files only if we are using cgo + // (which means gcc will compile them). + // The standard assemblers expect .s files. + if len(p.CgoFiles) > 0 { + p.SFiles = append(p.SFiles, Sfiles...) + sort.Strings(p.SFiles) + } + + return p, pkgerr +} + +// hasGoFiles reports whether dir contains any files with names ending in .go. +// For a vendor check we must exclude directories that contain no .go files. +// Otherwise it is not possible to vendor just a/b/c and still import the +// non-vendored a/b. See golang.org/issue/13832. +func hasGoFiles(ctxt *Context, dir string) bool { + ents, _ := ctxt.readDir(dir) + for _, ent := range ents { + if !ent.IsDir() && strings.HasSuffix(ent.Name(), ".go") { + return true + } + } + return false +} + +func findImportComment(data []byte) (s string, line int) { + // expect keyword package + word, data := parseWord(data) + if string(word) != "package" { + return "", 0 + } + + // expect package name + _, data = parseWord(data) + + // now ready for import comment, a // or /* */ comment + // beginning and ending on the current line. + for len(data) > 0 && (data[0] == ' ' || data[0] == '\t' || data[0] == '\r') { + data = data[1:] + } + + var comment []byte + switch { + case bytes.HasPrefix(data, slashSlash): + i := bytes.Index(data, newline) + if i < 0 { + i = len(data) + } + comment = data[2:i] + case bytes.HasPrefix(data, slashStar): + data = data[2:] + i := bytes.Index(data, starSlash) + if i < 0 { + // malformed comment + return "", 0 + } + comment = data[:i] + if bytes.Contains(comment, newline) { + return "", 0 + } + } + comment = bytes.TrimSpace(comment) + + // split comment into `import`, `"pkg"` + word, arg := parseWord(comment) + if string(word) != "import" { + return "", 0 + } + + line = 1 + bytes.Count(data[:cap(data)-cap(arg)], newline) + return strings.TrimSpace(string(arg)), line +} + +var ( + slashSlash = []byte("//") + slashStar = []byte("/*") + starSlash = []byte("*/") + newline = []byte("\n") +) + +// skipSpaceOrComment returns data with any leading spaces or comments removed. +func skipSpaceOrComment(data []byte) []byte { + for len(data) > 0 { + switch data[0] { + case ' ', '\t', '\r', '\n': + data = data[1:] + continue + case '/': + if bytes.HasPrefix(data, slashSlash) { + i := bytes.Index(data, newline) + if i < 0 { + return nil + } + data = data[i+1:] + continue + } + if bytes.HasPrefix(data, slashStar) { + data = data[2:] + i := bytes.Index(data, starSlash) + if i < 0 { + return nil + } + data = data[i+2:] + continue + } + } + break + } + return data +} + +// parseWord skips any leading spaces or comments in data +// and then parses the beginning of data as an identifier or keyword, +// returning that word and what remains after the word. +func parseWord(data []byte) (word, rest []byte) { + data = skipSpaceOrComment(data) + + // Parse past leading word characters. + rest = data + for { + r, size := utf8.DecodeRune(rest) + if unicode.IsLetter(r) || '0' <= r && r <= '9' || r == '_' { + rest = rest[size:] + continue + } + break + } + + word = data[:len(data)-len(rest)] + if len(word) == 0 { + return nil, nil + } + + return word, rest +} + +// MatchFile reports whether the file with the given name in the given directory +// matches the context and would be included in a Package created by ImportDir +// of that directory. +// +// MatchFile considers the name of the file and may use ctxt.OpenFile to +// read some or all of the file's content. +func (ctxt *Context) MatchFile(dir, name string) (match bool, err error) { + match, _, _, err = ctxt.matchFile(dir, name, nil, nil) + return +} + +// matchFile determines whether the file with the given name in the given directory +// should be included in the package being constructed. +// It returns the data read from the file. +// If name denotes a Go program, matchFile reads until the end of the +// imports (and returns that data) even though it only considers text +// until the first non-comment. +// If allTags is non-nil, matchFile records any encountered build tag +// by setting allTags[tag] = true. +func (ctxt *Context) matchFile(dir, name string, allTags map[string]bool, binaryOnly *bool) (match bool, data []byte, filename string, err error) { + if strings.HasPrefix(name, "_") || + strings.HasPrefix(name, ".") { + return + } + + i := strings.LastIndex(name, ".") + if i < 0 { + i = len(name) + } + ext := name[i:] + + if !ctxt.goodOSArchFile(name, allTags) && !ctxt.UseAllFiles { + return + } + + switch ext { + case ".go", ".c", ".cc", ".cxx", ".cpp", ".m", ".s", ".h", ".hh", ".hpp", ".hxx", ".f", ".F", ".f90", ".S", ".swig", ".swigcxx": + // tentatively okay - read to make sure + case ".syso": + // binary, no reading + match = true + return + default: + // skip + return + } + + filename = ctxt.joinPath(dir, name) + f, err := ctxt.openFile(filename) + if err != nil { + return + } + + if strings.HasSuffix(filename, ".go") { + data, err = readImports(f, false, nil) + if strings.HasSuffix(filename, "_test.go") { + binaryOnly = nil // ignore //go:binary-only-package comments in _test.go files + } + } else { + binaryOnly = nil // ignore //go:binary-only-package comments in non-Go sources + data, err = readComments(f) + } + f.Close() + if err != nil { + err = fmt.Errorf("read %s: %v", filename, err) + return + } + + // Look for +build comments to accept or reject the file. + var sawBinaryOnly bool + if !ctxt.shouldBuild(data, allTags, &sawBinaryOnly) && !ctxt.UseAllFiles { + return + } + + if binaryOnly != nil && sawBinaryOnly { + *binaryOnly = true + } + match = true + return +} + +func cleanImports(m map[string][]token.Position) ([]string, map[string][]token.Position) { + all := make([]string, 0, len(m)) + for path := range m { + all = append(all, path) + } + sort.Strings(all) + return all, m +} + +// Import is shorthand for Default.Import. +func Import(path, srcDir string, mode ImportMode) (*Package, error) { + return Default.Import(path, srcDir, mode) +} + +// ImportDir is shorthand for Default.ImportDir. +func ImportDir(dir string, mode ImportMode) (*Package, error) { + return Default.ImportDir(dir, mode) +} + +var slashslash = []byte("//") + +// Special comment denoting a binary-only package. +// See https://golang.org/design/2775-binary-only-packages +// for more about the design of binary-only packages. +var binaryOnlyComment = []byte("//go:binary-only-package") + +// shouldBuild reports whether it is okay to use this file, +// The rule is that in the file's leading run of // comments +// and blank lines, which must be followed by a blank line +// (to avoid including a Go package clause doc comment), +// lines beginning with '// +build' are taken as build directives. +// +// The file is accepted only if each such line lists something +// matching the file. For example: +// +// // +build windows linux +// +// marks the file as applicable only on Windows and Linux. +// +// If shouldBuild finds a //go:binary-only-package comment in the file, +// it sets *binaryOnly to true. Otherwise it does not change *binaryOnly. +// +func (ctxt *Context) shouldBuild(content []byte, allTags map[string]bool, binaryOnly *bool) bool { + sawBinaryOnly := false + + // Pass 1. Identify leading run of // comments and blank lines, + // which must be followed by a blank line. + end := 0 + p := content + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if len(line) == 0 { // Blank line + end = len(content) - len(p) + continue + } + if !bytes.HasPrefix(line, slashslash) { // Not comment line + break + } + } + content = content[:end] + + // Pass 2. Process each line in the run. + p = content + hasReq := len(ctxt.RequiredTags) > 0 + allok := !hasReq + for len(p) > 0 { + line := p + if i := bytes.IndexByte(line, '\n'); i >= 0 { + line, p = line[:i], p[i+1:] + } else { + p = p[len(p):] + } + line = bytes.TrimSpace(line) + if bytes.HasPrefix(line, slashslash) { + if bytes.Equal(line, binaryOnlyComment) { + sawBinaryOnly = true + } + line = bytes.TrimSpace(line[len(slashslash):]) + if len(line) > 0 && line[0] == '+' { + // Looks like a comment +line. + f := strings.Fields(string(line)) + if f[0] == "+build" { + ok := false + for _, tok := range f[1:] { + tags := map[string]bool{} + if ctxt.match(tok, tags) { + if containsAll(tags, ctxt.RequiredTags) { + ok = true + } + } + merge(allTags, tags) + } + if !hasReq { + if !ok { + allok = false + } + } else { + if ok { + allok = true + } + } + } + } + } + } + + if binaryOnly != nil && sawBinaryOnly { + *binaryOnly = true + } + + return allok +} + +func merge(to, from map[string]bool) { + if to == nil { + return + } + for k, v := range from { + to[k] = v + } +} + +func containsAll(m map[string]bool, vals []string) bool { + // yes this is N^2, but N is small. + for _, v := range vals { + if !m[v] { + return false + } + } + return true +} + +func contains(list []string, s string) bool { + for _, l := range list { + if l == s { + return true + } + } + return false +} + +// saveCgo saves the information from the #cgo lines in the import "C" comment. +// These lines set CFLAGS, CPPFLAGS, CXXFLAGS and LDFLAGS and pkg-config directives +// that affect the way cgo's C code is built. +func (ctxt *Context) saveCgo(filename string, di *Package, cg *ast.CommentGroup) error { + text := cg.Text() + for _, line := range strings.Split(text, "\n") { + orig := line + + // Line is + // #cgo [GOOS/GOARCH...] LDFLAGS: stuff + // + line = strings.TrimSpace(line) + if len(line) < 5 || line[:4] != "#cgo" || (line[4] != ' ' && line[4] != '\t') { + continue + } + + // Split at colon. + line = strings.TrimSpace(line[4:]) + i := strings.Index(line, ":") + if i < 0 { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + line, argstr := line[:i], line[i+1:] + + // Parse GOOS/GOARCH stuff. + f := strings.Fields(line) + if len(f) < 1 { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + + cond, verb := f[:len(f)-1], f[len(f)-1] + if len(cond) > 0 { + ok := false + for _, c := range cond { + if ctxt.match(c, nil) { + ok = true + break + } + } + if !ok { + continue + } + } + + args, err := splitQuoted(argstr) + if err != nil { + return fmt.Errorf("%s: invalid #cgo line: %s", filename, orig) + } + var ok bool + for i, arg := range args { + if arg, ok = expandSrcDir(arg, di.Dir); !ok { + return fmt.Errorf("%s: malformed #cgo argument: %s", filename, arg) + } + args[i] = arg + } + + switch verb { + case "CFLAGS", "CPPFLAGS", "CXXFLAGS", "FFLAGS", "LDFLAGS": + // Change relative paths to absolute. + ctxt.makePathsAbsolute(args, di.Dir) + } + + switch verb { + case "CFLAGS": + di.CgoCFLAGS = append(di.CgoCFLAGS, args...) + case "CPPFLAGS": + di.CgoCPPFLAGS = append(di.CgoCPPFLAGS, args...) + case "CXXFLAGS": + di.CgoCXXFLAGS = append(di.CgoCXXFLAGS, args...) + case "FFLAGS": + di.CgoFFLAGS = append(di.CgoFFLAGS, args...) + case "LDFLAGS": + di.CgoLDFLAGS = append(di.CgoLDFLAGS, args...) + case "pkg-config": + di.CgoPkgConfig = append(di.CgoPkgConfig, args...) + default: + return fmt.Errorf("%s: invalid #cgo verb: %s", filename, orig) + } + } + return nil +} + +// expandSrcDir expands any occurrence of ${SRCDIR}, making sure +// the result is safe for the shell. +func expandSrcDir(str string, srcdir string) (string, bool) { + // "\" delimited paths cause safeCgoName to fail + // so convert native paths with a different delimiter + // to "/" before starting (eg: on windows). + srcdir = filepath.ToSlash(srcdir) + + chunks := strings.Split(str, "${SRCDIR}") + if len(chunks) < 2 { + return str, safeCgoName(str) + } + ok := true + for _, chunk := range chunks { + ok = ok && (chunk == "" || safeCgoName(chunk)) + } + ok = ok && (srcdir == "" || safeCgoName(srcdir)) + res := strings.Join(chunks, srcdir) + return res, ok && res != "" +} + +// makePathsAbsolute looks for compiler options that take paths and +// makes them absolute. We do this because through the 1.8 release we +// ran the compiler in the package directory, so any relative -I or -L +// options would be relative to that directory. In 1.9 we changed to +// running the compiler in the build directory, to get consistent +// build results (issue #19964). To keep builds working, we change any +// relative -I or -L options to be absolute. +// +// Using filepath.IsAbs and filepath.Join here means the results will be +// different on different systems, but that's OK: -I and -L options are +// inherently system-dependent. +func (ctxt *Context) makePathsAbsolute(args []string, srcDir string) { + nextPath := false + for i, arg := range args { + if nextPath { + if !filepath.IsAbs(arg) { + args[i] = filepath.Join(srcDir, arg) + } + nextPath = false + } else if strings.HasPrefix(arg, "-I") || strings.HasPrefix(arg, "-L") { + if len(arg) == 2 { + nextPath = true + } else { + if !filepath.IsAbs(arg[2:]) { + args[i] = arg[:2] + filepath.Join(srcDir, arg[2:]) + } + } + } + } +} + +// NOTE: $ is not safe for the shell, but it is allowed here because of linker options like -Wl,$ORIGIN. +// We never pass these arguments to a shell (just to programs we construct argv for), so this should be okay. +// See golang.org/issue/6038. +// The @ is for OS X. See golang.org/issue/13720. +// The % is for Jenkins. See golang.org/issue/16959. +const safeString = "+-.,/0123456789=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz:$@% " + +func safeCgoName(s string) bool { + if s == "" { + return false + } + for i := 0; i < len(s); i++ { + if c := s[i]; c < utf8.RuneSelf && strings.IndexByte(safeString, c) < 0 { + return false + } + } + return true +} + +// splitQuoted splits the string s around each instance of one or more consecutive +// white space characters while taking into account quotes and escaping, and +// returns an array of substrings of s or an empty list if s contains only white space. +// Single quotes and double quotes are recognized to prevent splitting within the +// quoted region, and are removed from the resulting substrings. If a quote in s +// isn't closed err will be set and r will have the unclosed argument as the +// last element. The backslash is used for escaping. +// +// For example, the following string: +// +// a b:"c d" 'e''f' "g\"" +// +// Would be parsed as: +// +// []string{"a", "b:c d", "ef", `g"`} +// +func splitQuoted(s string) (r []string, err error) { + var args []string + arg := make([]rune, len(s)) + escaped := false + quoted := false + quote := '\x00' + i := 0 + for _, rune := range s { + switch { + case escaped: + escaped = false + case rune == '\\': + escaped = true + continue + case quote != '\x00': + if rune == quote { + quote = '\x00' + continue + } + case rune == '"' || rune == '\'': + quoted = true + quote = rune + continue + case unicode.IsSpace(rune): + if quoted || i > 0 { + quoted = false + args = append(args, string(arg[:i])) + i = 0 + } + continue + } + arg[i] = rune + i++ + } + if quoted || i > 0 { + args = append(args, string(arg[:i])) + } + if quote != 0 { + err = errors.New("unclosed quote") + } else if escaped { + err = errors.New("unfinished escaping") + } + return args, err +} + +// match reports whether the name is one of: +// +// $GOOS +// $GOARCH +// cgo (if cgo is enabled) +// !cgo (if cgo is disabled) +// ctxt.Compiler +// !ctxt.Compiler +// tag (if tag is listed in ctxt.BuildTags or ctxt.ReleaseTags) +// !tag (if tag is not listed in ctxt.BuildTags or ctxt.ReleaseTags) +// a comma-separated list of any of these +// +func (ctxt *Context) match(name string, allTags map[string]bool) bool { + if name == "" { + if allTags != nil { + allTags[name] = true + } + return false + } + if i := strings.Index(name, ","); i >= 0 { + // comma-separated list + ok1 := ctxt.match(name[:i], allTags) + ok2 := ctxt.match(name[i+1:], allTags) + return ok1 && ok2 + } + if strings.HasPrefix(name, "!!") { // bad syntax, reject always + return false + } + if strings.HasPrefix(name, "!") { // negation + return len(name) > 1 && !ctxt.match(name[1:], allTags) + } + + if allTags != nil { + allTags[name] = true + } + + // Tags must be letters, digits, underscores or dots. + // Unlike in Go identifiers, all digits are fine (e.g., "386"). + for _, c := range name { + if !unicode.IsLetter(c) && !unicode.IsDigit(c) && c != '_' && c != '.' { + return false + } + } + + // special tags + if ctxt.CgoEnabled && name == "cgo" { + return true + } + if name == ctxt.GOOS || name == ctxt.GOARCH || name == ctxt.Compiler { + return true + } + if ctxt.GOOS == "android" && name == "linux" { + return true + } + + // other tags + for _, tag := range ctxt.BuildTags { + if tag == name { + return true + } + } + for _, tag := range ctxt.ReleaseTags { + if tag == name { + return true + } + } + + return false +} + +// goodOSArchFile returns false if the name contains a $GOOS or $GOARCH +// suffix which does not match the current system. +// The recognized name formats are: +// +// name_$(GOOS).* +// name_$(GOARCH).* +// name_$(GOOS)_$(GOARCH).* +// name_$(GOOS)_test.* +// name_$(GOARCH)_test.* +// name_$(GOOS)_$(GOARCH)_test.* +// +// An exception: if GOOS=android, then files with GOOS=linux are also matched. +func (ctxt *Context) goodOSArchFile(name string, allTags map[string]bool) bool { + if dot := strings.Index(name, "."); dot != -1 { + name = name[:dot] + } + + // Before Go 1.4, a file called "linux.go" would be equivalent to having a + // build tag "linux" in that file. For Go 1.4 and beyond, we require this + // auto-tagging to apply only to files with a non-empty prefix, so + // "foo_linux.go" is tagged but "linux.go" is not. This allows new operating + // systems, such as android, to arrive without breaking existing code with + // innocuous source code in "android.go". The easiest fix: cut everything + // in the name before the initial _. + i := strings.Index(name, "_") + if i < 0 { + return true + } + name = name[i:] // ignore everything before first _ + + l := strings.Split(name, "_") + if n := len(l); n > 0 && l[n-1] == "test" { + l = l[:n-1] + } + n := len(l) + if n >= 2 && knownOS[l[n-2]] && knownArch[l[n-1]] { + if allTags != nil { + allTags[l[n-2]] = true + allTags[l[n-1]] = true + } + if l[n-1] != ctxt.GOARCH { + return false + } + if ctxt.GOOS == "android" && l[n-2] == "linux" { + return true + } + return l[n-2] == ctxt.GOOS + } + if n >= 1 && knownOS[l[n-1]] { + if allTags != nil { + allTags[l[n-1]] = true + } + if ctxt.GOOS == "android" && l[n-1] == "linux" { + return true + } + return l[n-1] == ctxt.GOOS + } + if n >= 1 && knownArch[l[n-1]] { + if allTags != nil { + allTags[l[n-1]] = true + } + return l[n-1] == ctxt.GOARCH + } + return true +} + +var knownOS = make(map[string]bool) +var knownArch = make(map[string]bool) + +func init() { + for _, v := range strings.Fields(goosList) { + knownOS[v] = true + } + for _, v := range strings.Fields(goarchList) { + knownArch[v] = true + } +} + +// ToolDir is the directory containing build tools. +var ToolDir = filepath.Join(runtime.GOROOT(), "pkg/tool/"+runtime.GOOS+"_"+runtime.GOARCH) + +// IsLocalImport reports whether the import path is +// a local import path, like ".", "..", "./foo", or "../foo". +func IsLocalImport(path string) bool { + return path == "." || path == ".." || + strings.HasPrefix(path, "./") || strings.HasPrefix(path, "../") +} + +// ArchChar returns "?" and an error. +// In earlier versions of Go, the returned string was used to derive +// the compiler and linker tool names, the default object file suffix, +// and the default linker output name. As of Go 1.5, those strings +// no longer vary by architecture; they are compile, link, .o, and a.out, respectively. +func ArchChar(goarch string) (string, error) { + return "?", errors.New("architecture letter no longer used") +} diff --git a/vendor/github.com/magefile/mage/build/build_test.go b/vendor/github.com/magefile/mage/build/build_test.go new file mode 100644 index 0000000..89b2c09 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/build_test.go @@ -0,0 +1,446 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +import ( + "io" + "os" + "path/filepath" + "reflect" + "runtime" + "strings" + "testing" +) + +func TestMatch(t *testing.T) { + ctxt := Default + what := "default" + match := func(tag string, want map[string]bool) { + m := make(map[string]bool) + if !ctxt.match(tag, m) { + t.Errorf("%s context should match %s, does not", what, tag) + } + if !reflect.DeepEqual(m, want) { + t.Errorf("%s tags = %v, want %v", tag, m, want) + } + } + nomatch := func(tag string, want map[string]bool) { + m := make(map[string]bool) + if ctxt.match(tag, m) { + t.Errorf("%s context should NOT match %s, does", what, tag) + } + if !reflect.DeepEqual(m, want) { + t.Errorf("%s tags = %v, want %v", tag, m, want) + } + } + + match(runtime.GOOS+","+runtime.GOARCH, map[string]bool{runtime.GOOS: true, runtime.GOARCH: true}) + match(runtime.GOOS+","+runtime.GOARCH+",!foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true}) + nomatch(runtime.GOOS+","+runtime.GOARCH+",foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true}) + + what = "modified" + ctxt.BuildTags = []string{"foo"} + match(runtime.GOOS+","+runtime.GOARCH, map[string]bool{runtime.GOOS: true, runtime.GOARCH: true}) + match(runtime.GOOS+","+runtime.GOARCH+",foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true}) + nomatch(runtime.GOOS+","+runtime.GOARCH+",!foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true}) + match(runtime.GOOS+","+runtime.GOARCH+",!bar", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "bar": true}) + nomatch(runtime.GOOS+","+runtime.GOARCH+",bar", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "bar": true}) + nomatch("!", map[string]bool{}) +} + +func TestDotSlashImport(t *testing.T) { + p, err := ImportDir("testdata/other", 0) + if err != nil { + t.Fatal(err) + } + if len(p.Imports) != 1 || p.Imports[0] != "./file" { + t.Fatalf("testdata/other: Imports=%v, want [./file]", p.Imports) + } + + p1, err := Import("./file", "testdata/other", 0) + if err != nil { + t.Fatal(err) + } + if p1.Name != "file" { + t.Fatalf("./file: Name=%q, want %q", p1.Name, "file") + } + dir := filepath.Clean("testdata/other/file") // Clean to use \ on Windows + if p1.Dir != dir { + t.Fatalf("./file: Dir=%q, want %q", p1.Name, dir) + } +} + +func TestEmptyImport(t *testing.T) { + p, err := Import("", Default.GOROOT, FindOnly) + if err == nil { + t.Fatal(`Import("") returned nil error.`) + } + if p == nil { + t.Fatal(`Import("") returned nil package.`) + } + if p.ImportPath != "" { + t.Fatalf("ImportPath=%q, want %q.", p.ImportPath, "") + } +} + +func TestEmptyFolderImport(t *testing.T) { + _, err := Import(".", "testdata/empty", 0) + if _, ok := err.(*NoGoError); !ok { + t.Fatal(`Import("testdata/empty") did not return NoGoError.`) + } +} + +func TestMultiplePackageImport(t *testing.T) { + _, err := Import(".", "testdata/multi", 0) + mpe, ok := err.(*MultiplePackageError) + if !ok { + t.Fatal(`Import("testdata/multi") did not return MultiplePackageError.`) + } + want := &MultiplePackageError{ + Dir: filepath.FromSlash("testdata/multi"), + Packages: []string{"main", "test_package"}, + Files: []string{"file.go", "file_appengine.go"}, + } + if !reflect.DeepEqual(mpe, want) { + t.Errorf("got %#v; want %#v", mpe, want) + } +} + +func TestLocalDirectory(t *testing.T) { + if runtime.GOOS == "darwin" { + switch runtime.GOARCH { + case "arm", "arm64": + t.Skipf("skipping on %s/%s, no valid GOROOT", runtime.GOOS, runtime.GOARCH) + } + } + + cwd, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + + p, err := ImportDir(cwd, 0) + if err != nil { + t.Fatal(err) + } + if p.ImportPath != "github.com/magefile/mage/build" { + t.Fatalf("ImportPath=%q, want %q", p.ImportPath, "github.com/magefile/mage/build") + } +} + +func TestShouldBuild(t *testing.T) { + const file1 = "// +build tag1\n\n" + + "package main\n" + want1 := map[string]bool{"tag1": true} + + const file2 = "// +build cgo\n\n" + + "// This package implements parsing of tags like\n" + + "// +build tag1\n" + + "package build" + want2 := map[string]bool{"cgo": true} + + const file3 = "// Copyright The Go Authors.\n\n" + + "package build\n\n" + + "// shouldBuild checks tags given by lines of the form\n" + + "// +build tag\n" + + "func shouldBuild(content []byte)\n" + want3 := map[string]bool{} + + ctx := &Context{BuildTags: []string{"tag1"}} + m := map[string]bool{} + if !ctx.shouldBuild([]byte(file1), m, nil) { + t.Errorf("shouldBuild(file1) = false, want true") + } + if !reflect.DeepEqual(m, want1) { + t.Errorf("shouldBuild(file1) tags = %v, want %v", m, want1) + } + + m = map[string]bool{} + if ctx.shouldBuild([]byte(file2), m, nil) { + t.Errorf("shouldBuild(file2) = true, want false") + } + if !reflect.DeepEqual(m, want2) { + t.Errorf("shouldBuild(file2) tags = %v, want %v", m, want2) + } + + m = map[string]bool{} + ctx = &Context{BuildTags: nil} + if !ctx.shouldBuild([]byte(file3), m, nil) { + t.Errorf("shouldBuild(file3) = false, want true") + } + if !reflect.DeepEqual(m, want3) { + t.Errorf("shouldBuild(file3) tags = %v, want %v", m, want3) + } +} + +func TestRequiredTags(t *testing.T) { + tests := []struct { + name string + file string + should bool + }{ + { + "no req tag", + "package main", + false, + }, + { + "has req tag", + `// +build req + + package main`, + true, + }, + { + "OR with req", + `// +build no req + + package main`, + true, + }, + } + + ctx := &Context{ + BuildTags: []string{"req"}, + RequiredTags: []string{"req"}, + } + for _, tst := range tests { + t.Run(tst.name, func(t *testing.T) { + if tst.should != ctx.shouldBuild([]byte(tst.file), nil, nil) { + t.Errorf("shouldBuild = %v, want %v", !tst.should, tst.should) + } + }) + } +} + +type readNopCloser struct { + io.Reader +} + +func (r readNopCloser) Close() error { + return nil +} + +var ( + ctxtP9 = Context{GOARCH: "arm", GOOS: "plan9"} + ctxtAndroid = Context{GOARCH: "arm", GOOS: "android"} +) + +var matchFileTests = []struct { + ctxt Context + name string + data string + match bool +}{ + {ctxtP9, "foo_arm.go", "", true}, + {ctxtP9, "foo1_arm.go", "// +build linux\n\npackage main\n", false}, + {ctxtP9, "foo_darwin.go", "", false}, + {ctxtP9, "foo.go", "", true}, + {ctxtP9, "foo1.go", "// +build linux\n\npackage main\n", false}, + {ctxtP9, "foo.badsuffix", "", false}, + {ctxtAndroid, "foo_linux.go", "", true}, + {ctxtAndroid, "foo_android.go", "", true}, + {ctxtAndroid, "foo_plan9.go", "", false}, + {ctxtAndroid, "android.go", "", true}, + {ctxtAndroid, "plan9.go", "", true}, + {ctxtAndroid, "plan9_test.go", "", true}, + {ctxtAndroid, "arm.s", "", true}, + {ctxtAndroid, "amd64.s", "", true}, +} + +func TestMatchFile(t *testing.T) { + for _, tt := range matchFileTests { + ctxt := tt.ctxt + ctxt.OpenFile = func(path string) (r io.ReadCloser, err error) { + if path != "x+"+tt.name { + t.Fatalf("OpenFile asked for %q, expected %q", path, "x+"+tt.name) + } + return &readNopCloser{strings.NewReader(tt.data)}, nil + } + ctxt.JoinPath = func(elem ...string) string { + return strings.Join(elem, "+") + } + match, err := ctxt.MatchFile("x", tt.name) + if match != tt.match || err != nil { + t.Fatalf("MatchFile(%q) = %v, %v, want %v, nil", tt.name, match, err, tt.match) + } + } +} + +func TestImportCmd(t *testing.T) { + if runtime.GOOS == "darwin" { + switch runtime.GOARCH { + case "arm", "arm64": + t.Skipf("skipping on %s/%s, no valid GOROOT", runtime.GOOS, runtime.GOARCH) + } + } + + p, err := Import("cmd/internal/objfile", "", 0) + if err != nil { + t.Fatal(err) + } + if !strings.HasSuffix(filepath.ToSlash(p.Dir), "src/cmd/internal/objfile") { + t.Fatalf("Import cmd/internal/objfile returned Dir=%q, want %q", filepath.ToSlash(p.Dir), ".../src/cmd/internal/objfile") + } +} + +var ( + expandSrcDirPath = filepath.Join(string(filepath.Separator)+"projects", "src", "add") +) + +var expandSrcDirTests = []struct { + input, expected string +}{ + {"-L ${SRCDIR}/libs -ladd", "-L /projects/src/add/libs -ladd"}, + {"${SRCDIR}/add_linux_386.a -pthread -lstdc++", "/projects/src/add/add_linux_386.a -pthread -lstdc++"}, + {"Nothing to expand here!", "Nothing to expand here!"}, + {"$", "$"}, + {"$$", "$$"}, + {"${", "${"}, + {"$}", "$}"}, + {"$FOO ${BAR}", "$FOO ${BAR}"}, + {"Find me the $SRCDIRECTORY.", "Find me the $SRCDIRECTORY."}, + {"$SRCDIR is missing braces", "$SRCDIR is missing braces"}, +} + +func TestExpandSrcDir(t *testing.T) { + for _, test := range expandSrcDirTests { + output, _ := expandSrcDir(test.input, expandSrcDirPath) + if output != test.expected { + t.Errorf("%q expands to %q with SRCDIR=%q when %q is expected", test.input, output, expandSrcDirPath, test.expected) + } else { + t.Logf("%q expands to %q with SRCDIR=%q", test.input, output, expandSrcDirPath) + } + } +} + +func TestShellSafety(t *testing.T) { + tests := []struct { + input, srcdir, expected string + result bool + }{ + {"-I${SRCDIR}/../include", "/projects/src/issue 11868", "-I/projects/src/issue 11868/../include", true}, + {"-I${SRCDIR}", "wtf$@%", "-Iwtf$@%", true}, + {"-X${SRCDIR}/1,${SRCDIR}/2", "/projects/src/issue 11868", "-X/projects/src/issue 11868/1,/projects/src/issue 11868/2", true}, + {"-I/tmp -I/tmp", "/tmp2", "-I/tmp -I/tmp", true}, + {"-I/tmp", "/tmp/[0]", "-I/tmp", true}, + {"-I${SRCDIR}/dir", "/tmp/[0]", "-I/tmp/[0]/dir", false}, + {"-I${SRCDIR}/dir", "/tmp/go go", "-I/tmp/go go/dir", true}, + {"-I${SRCDIR}/dir dir", "/tmp/go", "-I/tmp/go/dir dir", true}, + } + for _, test := range tests { + output, ok := expandSrcDir(test.input, test.srcdir) + if ok != test.result { + t.Errorf("Expected %t while %q expands to %q with SRCDIR=%q; got %t", test.result, test.input, output, test.srcdir, ok) + } + if output != test.expected { + t.Errorf("Expected %q while %q expands with SRCDIR=%q; got %q", test.expected, test.input, test.srcdir, output) + } + } +} + +// Want to get a "cannot find package" error when directory for package does not exist. +// There should be valid partial information in the returned non-nil *Package. +func TestImportDirNotExist(t *testing.T) { + MustHaveGoBuild(t) // really must just have source + ctxt := Default + ctxt.GOPATH = "" + + tests := []struct { + label string + path, srcDir string + mode ImportMode + }{ + {"Import(full, 0)", "go/build/doesnotexist", "", 0}, + {"Import(local, 0)", "./doesnotexist", filepath.Join(ctxt.GOROOT, "src/go/build"), 0}, + {"Import(full, FindOnly)", "go/build/doesnotexist", "", FindOnly}, + {"Import(local, FindOnly)", "./doesnotexist", filepath.Join(ctxt.GOROOT, "src/go/build"), FindOnly}, + } + for _, test := range tests { + p, err := ctxt.Import(test.path, test.srcDir, test.mode) + if err == nil || !strings.HasPrefix(err.Error(), "cannot find package") { + t.Errorf(`%s got error: %q, want "cannot find package" error`, test.label, err) + } + // If an error occurs, build.Import is documented to return + // a non-nil *Package containing partial information. + if p == nil { + t.Fatalf(`%s got nil p, want non-nil *Package`, test.label) + } + // Verify partial information in p. + if p.ImportPath != "go/build/doesnotexist" { + t.Errorf(`%s got p.ImportPath: %q, want "go/build/doesnotexist"`, test.label, p.ImportPath) + } + } +} + +func TestImportVendor(t *testing.T) { + MustHaveGoBuild(t) // really must just have source + ctxt := Default + ctxt.GOPATH = "" + p, err := ctxt.Import("golang_org/x/net/http2/hpack", filepath.Join(ctxt.GOROOT, "src/net/http"), 0) + if err != nil { + t.Fatalf("cannot find vendored golang_org/x/net/http2/hpack from net/http directory: %v", err) + } + want := "vendor/golang_org/x/net/http2/hpack" + if p.ImportPath != want { + t.Fatalf("Import succeeded but found %q, want %q", p.ImportPath, want) + } +} + +func TestImportVendorFailure(t *testing.T) { + MustHaveGoBuild(t) // really must just have source + ctxt := Default + ctxt.GOPATH = "" + p, err := ctxt.Import("x.com/y/z", filepath.Join(ctxt.GOROOT, "src/net/http"), 0) + if err == nil { + t.Fatalf("found made-up package x.com/y/z in %s", p.Dir) + } + + e := err.Error() + if !strings.Contains(e, " (vendor tree)") { + t.Fatalf("error on failed import does not mention GOROOT/src/vendor directory:\n%s", e) + } +} + +func TestImportVendorParentFailure(t *testing.T) { + MustHaveGoBuild(t) // really must just have source + ctxt := Default + ctxt.GOPATH = "" + // This import should fail because the vendor/golang.org/x/net/http2 directory has no source code. + p, err := ctxt.Import("golang_org/x/net/http2", filepath.Join(ctxt.GOROOT, "src/net/http"), 0) + if err == nil { + t.Fatalf("found empty parent in %s", p.Dir) + } + if p != nil && p.Dir != "" { + t.Fatalf("decided to use %s", p.Dir) + } + e := err.Error() + if !strings.Contains(e, " (vendor tree)") { + t.Fatalf("error on failed import does not mention GOROOT/src/vendor directory:\n%s", e) + } +} + +// HasGoBuild reports whether the current system can build programs with ``go build'' +// and then run them with os.StartProcess or exec.Command. +func HasGoBuild() bool { + switch runtime.GOOS { + case "android", "nacl": + return false + case "darwin": + if strings.HasPrefix(runtime.GOARCH, "arm") { + return false + } + } + return true +} + +// MustHaveGoBuild checks that the current system can build programs with ``go build'' +// and then run them with os.StartProcess or exec.Command. +// If not, MustHaveGoBuild calls t.Skip with an explanation. +func MustHaveGoBuild(t *testing.T) { + if !HasGoBuild() { + t.Skipf("skipping test: 'go build' not available on %s/%s", runtime.GOOS, runtime.GOARCH) + } +} diff --git a/vendor/github.com/magefile/mage/build/deps_test.go b/vendor/github.com/magefile/mage/build/deps_test.go new file mode 100644 index 0000000..1e64d0c --- /dev/null +++ b/vendor/github.com/magefile/mage/build/deps_test.go @@ -0,0 +1,556 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file exercises the import parser but also checks that +// some low-level packages do not have new dependencies added. + +package build + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "sort" + "strconv" + "strings" +) + +// pkgDeps defines the expected dependencies between packages in +// the Go source tree. It is a statement of policy. +// Changes should not be made to this map without prior discussion. +// +// The map contains two kinds of entries: +// 1) Lower-case keys are standard import paths and list the +// allowed imports in that package. +// 2) Upper-case keys define aliases for package sets, which can then +// be used as dependencies by other rules. +// +// DO NOT CHANGE THIS DATA TO FIX BUILDS. +// +var pkgDeps = map[string][]string{ + // L0 is the lowest level, core, nearly unavoidable packages. + "errors": {}, + "io": {"errors", "sync"}, + "runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"}, + "runtime/internal/sys": {}, + "runtime/internal/atomic": {"unsafe", "runtime/internal/sys"}, + "internal/race": {"runtime", "unsafe"}, + "sync": {"internal/race", "runtime", "sync/atomic", "unsafe"}, + "sync/atomic": {"unsafe"}, + "unsafe": {}, + "internal/cpu": {"runtime"}, + + "L0": { + "errors", + "io", + "runtime", + "runtime/internal/atomic", + "sync", + "sync/atomic", + "unsafe", + "internal/cpu", + }, + + // L1 adds simple functions and strings processing, + // but not Unicode tables. + "math": {"internal/cpu", "unsafe"}, + "math/bits": {}, + "math/cmplx": {"math"}, + "math/rand": {"L0", "math"}, + "strconv": {"L0", "unicode/utf8", "math"}, + "unicode/utf16": {}, + "unicode/utf8": {}, + + "L1": { + "L0", + "math", + "math/bits", + "math/cmplx", + "math/rand", + "sort", + "strconv", + "unicode/utf16", + "unicode/utf8", + }, + + // L2 adds Unicode and strings processing. + "bufio": {"L0", "unicode/utf8", "bytes"}, + "bytes": {"L0", "unicode", "unicode/utf8"}, + "path": {"L0", "unicode/utf8", "strings"}, + "strings": {"L0", "unicode", "unicode/utf8"}, + "unicode": {}, + + "L2": { + "L1", + "bufio", + "bytes", + "path", + "strings", + "unicode", + }, + + // L3 adds reflection and some basic utility packages + // and interface definitions, but nothing that makes + // system calls. + "crypto": {"L2", "hash"}, // interfaces + "crypto/cipher": {"L2", "crypto/subtle"}, + "crypto/subtle": {}, + "encoding/base32": {"L2"}, + "encoding/base64": {"L2"}, + "encoding/binary": {"L2", "reflect"}, + "hash": {"L2"}, // interfaces + "hash/adler32": {"L2", "hash"}, + "hash/crc32": {"L2", "hash"}, + "hash/crc64": {"L2", "hash"}, + "hash/fnv": {"L2", "hash"}, + "image": {"L2", "image/color"}, // interfaces + "image/color": {"L2"}, // interfaces + "image/color/palette": {"L2", "image/color"}, + "reflect": {"L2"}, + "sort": {"reflect"}, + + "L3": { + "L2", + "crypto", + "crypto/cipher", + "crypto/internal/cipherhw", + "crypto/subtle", + "encoding/base32", + "encoding/base64", + "encoding/binary", + "hash", + "hash/adler32", + "hash/crc32", + "hash/crc64", + "hash/fnv", + "image", + "image/color", + "image/color/palette", + "reflect", + }, + + // End of linear dependency definitions. + + // Operating system access. + "syscall": {"L0", "internal/race", "internal/syscall/windows/sysdll", "unicode/utf16"}, + "internal/syscall/unix": {"L0", "syscall"}, + "internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll"}, + "internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"}, + "time": { + // "L0" without the "io" package: + "errors", + "runtime", + "runtime/internal/atomic", + "sync", + "sync/atomic", + "unsafe", + // Other time dependencies: + "internal/syscall/windows/registry", + "syscall", + }, + + "internal/poll": {"L0", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8"}, + "os": {"L1", "os", "syscall", "time", "internal/poll", "internal/syscall/windows"}, + "path/filepath": {"L2", "os", "syscall"}, + "io/ioutil": {"L2", "os", "path/filepath", "time"}, + "os/exec": {"L2", "os", "context", "path/filepath", "syscall"}, + "os/signal": {"L2", "os", "syscall"}, + + // OS enables basic operating system functionality, + // but not direct use of package syscall, nor os/signal. + "OS": { + "io/ioutil", + "os", + "os/exec", + "path/filepath", + "time", + }, + + // Formatted I/O: few dependencies (L1) but we must add reflect. + "fmt": {"L1", "os", "reflect"}, + "log": {"L1", "os", "fmt", "time"}, + + // Packages used by testing must be low-level (L2+fmt). + "regexp": {"L2", "regexp/syntax"}, + "regexp/syntax": {"L2"}, + "runtime/debug": {"L2", "fmt", "io/ioutil", "os", "time"}, + "runtime/pprof": {"L2", "compress/gzip", "context", "encoding/binary", "fmt", "io/ioutil", "os", "text/tabwriter", "time"}, + "runtime/trace": {"L0"}, + "text/tabwriter": {"L2"}, + + "testing": {"L2", "flag", "fmt", "internal/race", "os", "runtime/debug", "runtime/pprof", "runtime/trace", "time"}, + "testing/iotest": {"L2", "log"}, + "testing/quick": {"L2", "flag", "fmt", "reflect", "time"}, + "internal/testenv": {"L2", "OS", "flag", "testing", "syscall"}, + + // L4 is defined as L3+fmt+log+time, because in general once + // you're using L3 packages, use of fmt, log, or time is not a big deal. + "L4": { + "L3", + "fmt", + "log", + "time", + }, + + // Go parser. + "go/ast": {"L4", "OS", "go/scanner", "go/token"}, + "go/doc": {"L4", "go/ast", "go/token", "regexp", "text/template"}, + "go/parser": {"L4", "OS", "go/ast", "go/scanner", "go/token"}, + "go/printer": {"L4", "OS", "go/ast", "go/scanner", "go/token", "text/tabwriter"}, + "go/scanner": {"L4", "OS", "go/token"}, + "go/token": {"L4"}, + + "GOPARSER": { + "go/ast", + "go/doc", + "go/parser", + "go/printer", + "go/scanner", + "go/token", + }, + + "go/format": {"L4", "GOPARSER", "internal/format"}, + "internal/format": {"L4", "GOPARSER"}, + + // Go type checking. + "go/constant": {"L4", "go/token", "math/big"}, + "go/importer": {"L4", "go/build", "go/internal/gccgoimporter", "go/internal/gcimporter", "go/internal/srcimporter", "go/token", "go/types"}, + "go/internal/gcimporter": {"L4", "OS", "go/build", "go/constant", "go/token", "go/types", "text/scanner"}, + "go/internal/gccgoimporter": {"L4", "OS", "debug/elf", "go/constant", "go/token", "go/types", "text/scanner"}, + "go/internal/srcimporter": {"L4", "fmt", "go/ast", "go/build", "go/parser", "go/token", "go/types", "path/filepath"}, + "go/types": {"L4", "GOPARSER", "container/heap", "go/constant"}, + + // One of a kind. + "archive/tar": {"L4", "OS", "syscall"}, + "archive/zip": {"L4", "OS", "compress/flate"}, + "container/heap": {"sort"}, + "compress/bzip2": {"L4"}, + "compress/flate": {"L4"}, + "compress/gzip": {"L4", "compress/flate"}, + "compress/lzw": {"L4"}, + "compress/zlib": {"L4", "compress/flate"}, + "context": {"errors", "fmt", "reflect", "sync", "time"}, + "database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"}, + "database/sql/driver": {"L4", "context", "time", "database/sql/internal"}, + "debug/dwarf": {"L4"}, + "debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"}, + "debug/gosym": {"L4"}, + "debug/macho": {"L4", "OS", "debug/dwarf"}, + "debug/pe": {"L4", "OS", "debug/dwarf"}, + "debug/plan9obj": {"L4", "OS"}, + "encoding": {"L4"}, + "encoding/ascii85": {"L4"}, + "encoding/asn1": {"L4", "math/big"}, + "encoding/csv": {"L4"}, + "encoding/gob": {"L4", "OS", "encoding"}, + "encoding/hex": {"L4"}, + "encoding/json": {"L4", "encoding"}, + "encoding/pem": {"L4"}, + "encoding/xml": {"L4", "encoding"}, + "flag": {"L4", "OS"}, + "go/build": {"L4", "OS", "GOPARSER"}, + "html": {"L4"}, + "image/draw": {"L4", "image/internal/imageutil"}, + "image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"}, + "image/internal/imageutil": {"L4"}, + "image/jpeg": {"L4", "image/internal/imageutil"}, + "image/png": {"L4", "compress/zlib"}, + "index/suffixarray": {"L4", "regexp"}, + "internal/singleflight": {"sync"}, + "internal/trace": {"L4", "OS"}, + "math/big": {"L4"}, + "mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"}, + "mime/quotedprintable": {"L4"}, + "net/internal/socktest": {"L4", "OS", "syscall"}, + "net/url": {"L4"}, + "plugin": {"L0", "OS", "CGO"}, + "runtime/pprof/internal/profile": {"L4", "OS", "compress/gzip", "regexp"}, + "testing/internal/testdeps": {"L4", "runtime/pprof", "regexp"}, + "text/scanner": {"L4", "OS"}, + "text/template/parse": {"L4"}, + + "html/template": { + "L4", "OS", "encoding/json", "html", "text/template", + "text/template/parse", + }, + "text/template": { + "L4", "OS", "net/url", "text/template/parse", + }, + + // Cgo. + // If you add a dependency on CGO, you must add the package to + // cgoPackages in cmd/dist/test.go. + "runtime/cgo": {"L0", "C"}, + "CGO": {"C", "runtime/cgo"}, + + // Fake entry to satisfy the pseudo-import "C" + // that shows up in programs that use cgo. + "C": {}, + + // Race detector/MSan uses cgo. + "runtime/race": {"C"}, + "runtime/msan": {"C"}, + + // Plan 9 alone needs io/ioutil and os. + "os/user": {"L4", "CGO", "io/ioutil", "os", "syscall"}, + + // Basic networking. + // Because net must be used by any package that wants to + // do networking portably, it must have a small dependency set: just L0+basic os. + "net": { + "L0", "CGO", + "context", "math/rand", "os", "reflect", "sort", "syscall", "time", + "internal/nettrace", "internal/poll", + "internal/syscall/windows", "internal/singleflight", "internal/race", + "golang_org/x/net/lif", "golang_org/x/net/route", + }, + + // NET enables use of basic network-related packages. + "NET": { + "net", + "mime", + "net/textproto", + "net/url", + }, + + // Uses of networking. + "log/syslog": {"L4", "OS", "net"}, + "net/mail": {"L4", "NET", "OS", "mime"}, + "net/textproto": {"L4", "OS", "net"}, + + // Core crypto. + "crypto/aes": {"L3"}, + "crypto/des": {"L3"}, + "crypto/hmac": {"L3"}, + "crypto/md5": {"L3"}, + "crypto/rc4": {"L3"}, + "crypto/sha1": {"L3"}, + "crypto/sha256": {"L3"}, + "crypto/sha512": {"L3"}, + + "CRYPTO": { + "crypto/aes", + "crypto/des", + "crypto/hmac", + "crypto/md5", + "crypto/rc4", + "crypto/sha1", + "crypto/sha256", + "crypto/sha512", + "golang_org/x/crypto/chacha20poly1305", + "golang_org/x/crypto/curve25519", + "golang_org/x/crypto/poly1305", + }, + + // Random byte, number generation. + // This would be part of core crypto except that it imports + // math/big, which imports fmt. + "crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall", "internal/syscall/unix"}, + + // Mathematical crypto: dependencies on fmt (L4) and math/big. + // We could avoid some of the fmt, but math/big imports fmt anyway. + "crypto/dsa": {"L4", "CRYPTO", "math/big"}, + "crypto/ecdsa": {"L4", "CRYPTO", "crypto/elliptic", "math/big", "encoding/asn1"}, + "crypto/elliptic": {"L4", "CRYPTO", "math/big"}, + "crypto/rsa": {"L4", "CRYPTO", "crypto/rand", "math/big"}, + + "CRYPTO-MATH": { + "CRYPTO", + "crypto/dsa", + "crypto/ecdsa", + "crypto/elliptic", + "crypto/rand", + "crypto/rsa", + "encoding/asn1", + "math/big", + }, + + // SSL/TLS. + "crypto/tls": { + "L4", "CRYPTO-MATH", "OS", + "container/list", "crypto/x509", "encoding/pem", "net", "syscall", + }, + "crypto/x509": { + "L4", "CRYPTO-MATH", "OS", "CGO", + "crypto/x509/pkix", "encoding/pem", "encoding/hex", "net", "os/user", "syscall", + }, + "crypto/x509/pkix": {"L4", "CRYPTO-MATH"}, + + // Simple net+crypto-aware packages. + "mime/multipart": {"L4", "OS", "mime", "crypto/rand", "net/textproto", "mime/quotedprintable"}, + "net/smtp": {"L4", "CRYPTO", "NET", "crypto/tls"}, + + // HTTP, kingpin of dependencies. + "net/http": { + "L4", "NET", "OS", + "compress/gzip", + "container/list", + "context", + "crypto/rand", + "crypto/tls", + "golang_org/x/net/http2/hpack", + "golang_org/x/net/idna", + "golang_org/x/net/lex/httplex", + "golang_org/x/net/proxy", + "golang_org/x/text/unicode/norm", + "golang_org/x/text/width", + "internal/nettrace", + "mime/multipart", + "net/http/httptrace", + "net/http/internal", + "runtime/debug", + }, + "net/http/internal": {"L4"}, + "net/http/httptrace": {"context", "crypto/tls", "internal/nettrace", "net", "reflect", "time"}, + + // HTTP-using packages. + "expvar": {"L4", "OS", "encoding/json", "net/http"}, + "net/http/cgi": {"L4", "NET", "OS", "crypto/tls", "net/http", "regexp"}, + "net/http/cookiejar": {"L4", "NET", "net/http"}, + "net/http/fcgi": {"L4", "NET", "OS", "context", "net/http", "net/http/cgi"}, + "net/http/httptest": {"L4", "NET", "OS", "crypto/tls", "flag", "net/http", "net/http/internal", "crypto/x509"}, + "net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal"}, + "net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof", "runtime/trace"}, + "net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http"}, + "net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"}, +} + +// isMacro reports whether p is a package dependency macro +// (uppercase name). +func isMacro(p string) bool { + return 'A' <= p[0] && p[0] <= 'Z' +} + +func allowed(pkg string) map[string]bool { + m := map[string]bool{} + var allow func(string) + allow = func(p string) { + if m[p] { + return + } + m[p] = true // set even for macros, to avoid loop on cycle + + // Upper-case names are macro-expanded. + if isMacro(p) { + for _, pp := range pkgDeps[p] { + allow(pp) + } + } + } + for _, pp := range pkgDeps[pkg] { + allow(pp) + } + return m +} + +// listStdPkgs returns the same list of packages as "go list std". +func listStdPkgs(goroot string) ([]string, error) { + // Based on cmd/go's matchPackages function. + var pkgs []string + + src := filepath.Join(goroot, "src") + string(filepath.Separator) + walkFn := func(path string, fi os.FileInfo, err error) error { + if err != nil || !fi.IsDir() || path == src { + return nil + } + + base := filepath.Base(path) + if strings.HasPrefix(base, ".") || strings.HasPrefix(base, "_") || base == "testdata" { + return filepath.SkipDir + } + + name := filepath.ToSlash(path[len(src):]) + if name == "builtin" || name == "cmd" || strings.Contains(name, "golang_org") { + return filepath.SkipDir + } + + pkgs = append(pkgs, name) + return nil + } + if err := filepath.Walk(src, walkFn); err != nil { + return nil, err + } + return pkgs, nil +} + +// This test does not function well in travis under different go versions for some reason. +// +// func TestDependencies(t *testing.T) { +// iOS := runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64") +// if runtime.GOOS == "nacl" || iOS { +// // Tests run in a limited file system and we do not +// // provide access to every source file. +// t.Skipf("skipping on %s/%s, missing full GOROOT", runtime.GOOS, runtime.GOARCH) +// } + +// ctxt := Default +// all, err := listStdPkgs(ctxt.GOROOT) +// if err != nil { +// t.Fatal(err) +// } +// sort.Strings(all) + +// for _, pkg := range all { +// imports, err := findImports(pkg) +// if err != nil { +// t.Error(err) +// continue +// } +// ok := allowed(pkg) +// var bad []string +// for _, imp := range imports { +// if !ok[imp] { +// bad = append(bad, imp) +// } +// } +// if bad != nil { +// t.Errorf("unexpected dependency: %s imports %v", pkg, bad) +// } +// } +// } + +var buildIgnore = []byte("\n// +build ignore") + +func findImports(pkg string) ([]string, error) { + dir := filepath.Join(Default.GOROOT, "src", pkg) + files, err := ioutil.ReadDir(dir) + if err != nil { + return nil, err + } + var imports []string + var haveImport = map[string]bool{} + for _, file := range files { + name := file.Name() + if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") { + continue + } + f, err := os.Open(filepath.Join(dir, name)) + if err != nil { + return nil, err + } + var imp []string + data, err := readImports(f, false, &imp) + f.Close() + if err != nil { + return nil, fmt.Errorf("reading %v: %v", name, err) + } + if bytes.Contains(data, buildIgnore) { + continue + } + for _, quoted := range imp { + path, err := strconv.Unquote(quoted) + if err != nil { + continue + } + if !haveImport[path] { + haveImport[path] = true + imports = append(imports, path) + } + } + } + sort.Strings(imports) + return imports, nil +} diff --git a/vendor/github.com/magefile/mage/build/doc.go b/vendor/github.com/magefile/mage/build/doc.go new file mode 100644 index 0000000..422e1a5 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/doc.go @@ -0,0 +1,166 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package build gathers information about Go packages. +// +// Go Path +// +// The Go path is a list of directory trees containing Go source code. +// It is consulted to resolve imports that cannot be found in the standard +// Go tree. The default path is the value of the GOPATH environment +// variable, interpreted as a path list appropriate to the operating system +// (on Unix, the variable is a colon-separated string; +// on Windows, a semicolon-separated string; +// on Plan 9, a list). +// +// Each directory listed in the Go path must have a prescribed structure: +// +// The src/ directory holds source code. The path below 'src' determines +// the import path or executable name. +// +// The pkg/ directory holds installed package objects. +// As in the Go tree, each target operating system and +// architecture pair has its own subdirectory of pkg +// (pkg/GOOS_GOARCH). +// +// If DIR is a directory listed in the Go path, a package with +// source in DIR/src/foo/bar can be imported as "foo/bar" and +// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a" +// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a"). +// +// The bin/ directory holds compiled commands. +// Each command is named for its source directory, but only +// using the final element, not the entire path. That is, the +// command with source in DIR/src/foo/quux is installed into +// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped +// so that you can add DIR/bin to your PATH to get at the +// installed commands. +// +// Here's an example directory layout: +// +// GOPATH=/home/user/gocode +// +// /home/user/gocode/ +// src/ +// foo/ +// bar/ (go code in package bar) +// x.go +// quux/ (go code in package main) +// y.go +// bin/ +// quux (installed command) +// pkg/ +// linux_amd64/ +// foo/ +// bar.a (installed package object) +// +// Build Constraints +// +// A build constraint, also known as a build tag, is a line comment that begins +// +// // +build +// +// that lists the conditions under which a file should be included in the package. +// Constraints may appear in any kind of source file (not just Go), but +// they must appear near the top of the file, preceded +// only by blank lines and other line comments. These rules mean that in Go +// files a build constraint must appear before the package clause. +// +// To distinguish build constraints from package documentation, a series of +// build constraints must be followed by a blank line. +// +// A build constraint is evaluated as the OR of space-separated options; +// each option evaluates as the AND of its comma-separated terms; +// and each term is an alphanumeric word or, preceded by !, its negation. +// That is, the build constraint: +// +// // +build linux,386 darwin,!cgo +// +// corresponds to the boolean formula: +// +// (linux AND 386) OR (darwin AND (NOT cgo)) +// +// A file may have multiple build constraints. The overall constraint is the AND +// of the individual constraints. That is, the build constraints: +// +// // +build linux darwin +// // +build 386 +// +// corresponds to the boolean formula: +// +// (linux OR darwin) AND 386 +// +// During a particular build, the following words are satisfied: +// +// - the target operating system, as spelled by runtime.GOOS +// - the target architecture, as spelled by runtime.GOARCH +// - the compiler being used, either "gc" or "gccgo" +// - "cgo", if ctxt.CgoEnabled is true +// - "go1.1", from Go version 1.1 onward +// - "go1.2", from Go version 1.2 onward +// - "go1.3", from Go version 1.3 onward +// - "go1.4", from Go version 1.4 onward +// - "go1.5", from Go version 1.5 onward +// - "go1.6", from Go version 1.6 onward +// - "go1.7", from Go version 1.7 onward +// - "go1.8", from Go version 1.8 onward +// - "go1.9", from Go version 1.9 onward +// - any additional words listed in ctxt.BuildTags +// +// If a file's name, after stripping the extension and a possible _test suffix, +// matches any of the following patterns: +// *_GOOS +// *_GOARCH +// *_GOOS_GOARCH +// (example: source_windows_amd64.go) where GOOS and GOARCH represent +// any known operating system and architecture values respectively, then +// the file is considered to have an implicit build constraint requiring +// those terms (in addition to any explicit constraints in the file). +// +// To keep a file from being considered for the build: +// +// // +build ignore +// +// (any other unsatisfied word will work as well, but ``ignore'' is conventional.) +// +// To build a file only when using cgo, and only on Linux and OS X: +// +// // +build linux,cgo darwin,cgo +// +// Such a file is usually paired with another file implementing the +// default functionality for other systems, which in this case would +// carry the constraint: +// +// // +build !linux,!darwin !cgo +// +// Naming a file dns_windows.go will cause it to be included only when +// building the package for Windows; similarly, math_386.s will be included +// only when building the package for 32-bit x86. +// +// Using GOOS=android matches build tags and files as for GOOS=linux +// in addition to android tags and files. +// +// Binary-Only Packages +// +// It is possible to distribute packages in binary form without including the +// source code used for compiling the package. To do this, the package must +// be distributed with a source file not excluded by build constraints and +// containing a "//go:binary-only-package" comment. +// Like a build constraint, this comment must appear near the top of the file, +// preceded only by blank lines and other line comments and with a blank line +// following the comment, to separate it from the package documentation. +// Unlike build constraints, this comment is only recognized in non-test +// Go source files. +// +// The minimal source code for a binary-only package is therefore: +// +// //go:binary-only-package +// +// package mypkg +// +// The source code may include additional Go code. That code is never compiled +// but will be processed by tools like godoc and might be useful as end-user +// documentation. +// +package build diff --git a/vendor/github.com/magefile/mage/build/read.go b/vendor/github.com/magefile/mage/build/read.go new file mode 100644 index 0000000..29b8cdc --- /dev/null +++ b/vendor/github.com/magefile/mage/build/read.go @@ -0,0 +1,247 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +import ( + "bufio" + "errors" + "io" + "unicode/utf8" +) + +type importReader struct { + b *bufio.Reader + buf []byte + peek byte + err error + eof bool + nerr int +} + +func isIdent(c byte) bool { + return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf +} + +var ( + errSyntax = errors.New("syntax error") + errNUL = errors.New("unexpected NUL in input") +) + +// syntaxError records a syntax error, but only if an I/O error has not already been recorded. +func (r *importReader) syntaxError() { + if r.err == nil { + r.err = errSyntax + } +} + +// readByte reads the next byte from the input, saves it in buf, and returns it. +// If an error occurs, readByte records the error in r.err and returns 0. +func (r *importReader) readByte() byte { + c, err := r.b.ReadByte() + if err == nil { + r.buf = append(r.buf, c) + if c == 0 { + err = errNUL + } + } + if err != nil { + if err == io.EOF { + r.eof = true + } else if r.err == nil { + r.err = err + } + c = 0 + } + return c +} + +// peekByte returns the next byte from the input reader but does not advance beyond it. +// If skipSpace is set, peekByte skips leading spaces and comments. +func (r *importReader) peekByte(skipSpace bool) byte { + if r.err != nil { + if r.nerr++; r.nerr > 10000 { + panic("go/build: import reader looping") + } + return 0 + } + + // Use r.peek as first input byte. + // Don't just return r.peek here: it might have been left by peekByte(false) + // and this might be peekByte(true). + c := r.peek + if c == 0 { + c = r.readByte() + } + for r.err == nil && !r.eof { + if skipSpace { + // For the purposes of this reader, semicolons are never necessary to + // understand the input and are treated as spaces. + switch c { + case ' ', '\f', '\t', '\r', '\n', ';': + c = r.readByte() + continue + + case '/': + c = r.readByte() + if c == '/' { + for c != '\n' && r.err == nil && !r.eof { + c = r.readByte() + } + } else if c == '*' { + var c1 byte + for (c != '*' || c1 != '/') && r.err == nil { + if r.eof { + r.syntaxError() + } + c, c1 = c1, r.readByte() + } + } else { + r.syntaxError() + } + c = r.readByte() + continue + } + } + break + } + r.peek = c + return r.peek +} + +// nextByte is like peekByte but advances beyond the returned byte. +func (r *importReader) nextByte(skipSpace bool) byte { + c := r.peekByte(skipSpace) + r.peek = 0 + return c +} + +// readKeyword reads the given keyword from the input. +// If the keyword is not present, readKeyword records a syntax error. +func (r *importReader) readKeyword(kw string) { + r.peekByte(true) + for i := 0; i < len(kw); i++ { + if r.nextByte(false) != kw[i] { + r.syntaxError() + return + } + } + if isIdent(r.peekByte(false)) { + r.syntaxError() + } +} + +// readIdent reads an identifier from the input. +// If an identifier is not present, readIdent records a syntax error. +func (r *importReader) readIdent() { + c := r.peekByte(true) + if !isIdent(c) { + r.syntaxError() + return + } + for isIdent(r.peekByte(false)) { + r.peek = 0 + } +} + +// readString reads a quoted string literal from the input. +// If an identifier is not present, readString records a syntax error. +func (r *importReader) readString(save *[]string) { + switch r.nextByte(true) { + case '`': + start := len(r.buf) - 1 + for r.err == nil { + if r.nextByte(false) == '`' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof { + r.syntaxError() + } + } + case '"': + start := len(r.buf) - 1 + for r.err == nil { + c := r.nextByte(false) + if c == '"' { + if save != nil { + *save = append(*save, string(r.buf[start:])) + } + break + } + if r.eof || c == '\n' { + r.syntaxError() + } + if c == '\\' { + r.nextByte(false) + } + } + default: + r.syntaxError() + } +} + +// readImport reads an import clause - optional identifier followed by quoted string - +// from the input. +func (r *importReader) readImport(imports *[]string) { + c := r.peekByte(true) + if c == '.' { + r.peek = 0 + } else if isIdent(c) { + r.readIdent() + } + r.readString(imports) +} + +// readComments is like ioutil.ReadAll, except that it only reads the leading +// block of comments in the file. +func readComments(f io.Reader) ([]byte, error) { + r := &importReader{b: bufio.NewReader(f)} + r.peekByte(true) + if r.err == nil && !r.eof { + // Didn't reach EOF, so must have found a non-space byte. Remove it. + r.buf = r.buf[:len(r.buf)-1] + } + return r.buf, r.err +} + +// readImports is like ioutil.ReadAll, except that it expects a Go file as input +// and stops reading the input once the imports have completed. +func readImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte, error) { + r := &importReader{b: bufio.NewReader(f)} + + r.readKeyword("package") + r.readIdent() + for r.peekByte(true) == 'i' { + r.readKeyword("import") + if r.peekByte(true) == '(' { + r.nextByte(false) + for r.peekByte(true) != ')' && r.err == nil { + r.readImport(imports) + } + r.nextByte(false) + } else { + r.readImport(imports) + } + } + + // If we stopped successfully before EOF, we read a byte that told us we were done. + // Return all but that last byte, which would cause a syntax error if we let it through. + if r.err == nil && !r.eof { + return r.buf[:len(r.buf)-1], nil + } + + // If we stopped for a syntax error, consume the whole file so that + // we are sure we don't change the errors that go/parser returns. + if r.err == errSyntax && !reportSyntaxError { + r.err = nil + for r.err == nil && !r.eof { + r.readByte() + } + } + + return r.buf, r.err +} diff --git a/vendor/github.com/magefile/mage/build/read_test.go b/vendor/github.com/magefile/mage/build/read_test.go new file mode 100644 index 0000000..9cef657 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/read_test.go @@ -0,0 +1,226 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +import ( + "io" + "strings" + "testing" +) + +const quote = "`" + +type readTest struct { + // Test input contains ℙ where readImports should stop. + in string + err string +} + +var readImportsTests = []readTest{ + { + `package p`, + "", + }, + { + `package p; import "x"`, + "", + }, + { + `package p; import . "x"`, + "", + }, + { + `package p; import "x";ℙvar x = 1`, + "", + }, + { + `package p + + // comment + + import "x" + import _ "x" + import a "x" + + /* comment */ + + import ( + "x" /* comment */ + _ "x" + a "x" // comment + ` + quote + `x` + quote + ` + _ /*comment*/ ` + quote + `x` + quote + ` + a ` + quote + `x` + quote + ` + ) + import ( + ) + import () + import()import()import() + import();import();import() + + ℙvar x = 1 + `, + "", + }, +} + +var readCommentsTests = []readTest{ + { + `ℙpackage p`, + "", + }, + { + `ℙpackage p; import "x"`, + "", + }, + { + `ℙpackage p; import . "x"`, + "", + }, + { + `// foo + + /* bar */ + + /* quux */ // baz + + /*/ zot */ + + // asdf + ℙHello, world`, + "", + }, +} + +func testRead(t *testing.T, tests []readTest, read func(io.Reader) ([]byte, error)) { + for i, tt := range tests { + var in, testOut string + j := strings.Index(tt.in, "ℙ") + if j < 0 { + in = tt.in + testOut = tt.in + } else { + in = tt.in[:j] + tt.in[j+len("ℙ"):] + testOut = tt.in[:j] + } + r := strings.NewReader(in) + buf, err := read(r) + if err != nil { + if tt.err == "" { + t.Errorf("#%d: err=%q, expected success (%q)", i, err, string(buf)) + continue + } + if !strings.Contains(err.Error(), tt.err) { + t.Errorf("#%d: err=%q, expected %q", i, err, tt.err) + continue + } + continue + } + if err == nil && tt.err != "" { + t.Errorf("#%d: success, expected %q", i, tt.err) + continue + } + + out := string(buf) + if out != testOut { + t.Errorf("#%d: wrong output:\nhave %q\nwant %q\n", i, out, testOut) + } + } +} + +func TestReadImports(t *testing.T) { + testRead(t, readImportsTests, func(r io.Reader) ([]byte, error) { return readImports(r, true, nil) }) +} + +func TestReadComments(t *testing.T) { + testRead(t, readCommentsTests, readComments) +} + +var readFailuresTests = []readTest{ + { + `package`, + "syntax error", + }, + { + "package p\n\x00\nimport `math`\n", + "unexpected NUL in input", + }, + { + `package p; import`, + "syntax error", + }, + { + `package p; import "`, + "syntax error", + }, + { + "package p; import ` \n\n", + "syntax error", + }, + { + `package p; import "x`, + "syntax error", + }, + { + `package p; import _`, + "syntax error", + }, + { + `package p; import _ "`, + "syntax error", + }, + { + `package p; import _ "x`, + "syntax error", + }, + { + `package p; import .`, + "syntax error", + }, + { + `package p; import . "`, + "syntax error", + }, + { + `package p; import . "x`, + "syntax error", + }, + { + `package p; import (`, + "syntax error", + }, + { + `package p; import ("`, + "syntax error", + }, + { + `package p; import ("x`, + "syntax error", + }, + { + `package p; import ("x"`, + "syntax error", + }, +} + +func TestReadFailures(t *testing.T) { + // Errors should be reported (true arg to readImports). + testRead(t, readFailuresTests, func(r io.Reader) ([]byte, error) { return readImports(r, true, nil) }) +} + +func TestReadFailuresIgnored(t *testing.T) { + // Syntax errors should not be reported (false arg to readImports). + // Instead, entire file should be the output and no error. + // Convert tests not to return syntax errors. + tests := make([]readTest, len(readFailuresTests)) + copy(tests, readFailuresTests) + for i := range tests { + tt := &tests[i] + if !strings.Contains(tt.err, "NUL") { + tt.err = "" + } + } + testRead(t, tests, func(r io.Reader) ([]byte, error) { return readImports(r, false, nil) }) +} diff --git a/vendor/github.com/magefile/mage/build/syslist.go b/vendor/github.com/magefile/mage/build/syslist.go new file mode 100644 index 0000000..73fdbe6 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/syslist.go @@ -0,0 +1,8 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +const goosList = "android darwin dragonfly freebsd linux nacl netbsd openbsd plan9 solaris windows zos " +const goarchList = "386 amd64 amd64p32 arm armbe arm64 arm64be ppc64 ppc64le mips mipsle mips64 mips64le mips64p32 mips64p32le ppc s390 s390x sparc sparc64 " diff --git a/vendor/github.com/magefile/mage/build/syslist_test.go b/vendor/github.com/magefile/mage/build/syslist_test.go new file mode 100644 index 0000000..7973ff4 --- /dev/null +++ b/vendor/github.com/magefile/mage/build/syslist_test.go @@ -0,0 +1,62 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package build + +import ( + "runtime" + "testing" +) + +var ( + thisOS = runtime.GOOS + thisArch = runtime.GOARCH + otherOS = anotherOS() + otherArch = anotherArch() +) + +func anotherOS() string { + if thisOS != "darwin" { + return "darwin" + } + return "linux" +} + +func anotherArch() string { + if thisArch != "amd64" { + return "amd64" + } + return "386" +} + +type GoodFileTest struct { + name string + result bool +} + +var tests = []GoodFileTest{ + {"file.go", true}, + {"file.c", true}, + {"file_foo.go", true}, + {"file_" + thisArch + ".go", true}, + {"file_" + otherArch + ".go", false}, + {"file_" + thisOS + ".go", true}, + {"file_" + otherOS + ".go", false}, + {"file_" + thisOS + "_" + thisArch + ".go", true}, + {"file_" + otherOS + "_" + thisArch + ".go", false}, + {"file_" + thisOS + "_" + otherArch + ".go", false}, + {"file_" + otherOS + "_" + otherArch + ".go", false}, + {"file_foo_" + thisArch + ".go", true}, + {"file_foo_" + otherArch + ".go", false}, + {"file_" + thisOS + ".c", true}, + {"file_" + otherOS + ".c", false}, +} + +func TestGoodOSArch(t *testing.T) { + for _, test := range tests { + if Default.goodOSArchFile(test.name, make(map[string]bool)) != test.result { + t.Fatalf("goodOSArchFile(%q) != %v", test.name, test.result) + } + } +} diff --git a/vendor/github.com/magefile/mage/build/zcgo.go b/vendor/github.com/magefile/mage/build/zcgo.go new file mode 100644 index 0000000..86e2a2d --- /dev/null +++ b/vendor/github.com/magefile/mage/build/zcgo.go @@ -0,0 +1,37 @@ +// auto generated by go tool dist + +package build + +const defaultCGO_ENABLED = "" + +var cgoEnabled = map[string]bool{ + "android/386": true, + "android/amd64": true, + "android/arm": true, + "android/arm64": true, + "darwin/386": true, + "darwin/amd64": true, + "darwin/arm": true, + "darwin/arm64": true, + "dragonfly/amd64": true, + "freebsd/386": true, + "freebsd/amd64": true, + "linux/386": true, + "linux/amd64": true, + "linux/arm": true, + "linux/arm64": true, + "linux/mips": true, + "linux/mips64": true, + "linux/mips64le": true, + "linux/mipsle": true, + "linux/ppc64le": true, + "linux/s390x": true, + "netbsd/386": true, + "netbsd/amd64": true, + "netbsd/arm": true, + "openbsd/386": true, + "openbsd/amd64": true, + "solaris/amd64": true, + "windows/386": true, + "windows/amd64": true, +} diff --git a/vendor/github.com/magefile/mage/mage/magefile_tmpl.go b/vendor/github.com/magefile/mage/mage/magefile_tmpl.go new file mode 100644 index 0000000..01b8786 --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/magefile_tmpl.go @@ -0,0 +1,46 @@ +package mage + +var mageTpl = `// +build mage + +package main + +import ( + "fmt" + "os" + "os/exec" + + "github.com/magefile/mage/mg" // mg contains helpful utility functions, like Deps +) + +// Default target to run when none is specified +// If not set, running mage will list available targets +// var Default = Build + +// A build step that requires additional params, or platform specific steps for example +func Build() error { + mg.Deps(InstallDeps) + fmt.Println("Building...") + cmd := exec.Command("go", "build", "-o", "MyApp", ".") + return cmd.Run() +} + +// A custom install step if you need your bin someplace other than go/bin +func Install() error { + mg.Deps(Build) + fmt.Println("Installing...") + return os.Rename("./MyApp", "/usr/bin/MyApp") +} + +// Manage your deps, or running package managers. +func InstallDeps() error { + fmt.Println("Installing Deps...") + cmd := exec.Command("go", "get", "github.com/stretchr/piglatin") + return cmd.Run() +} + +// Clean up after yourself +func Clean() { + fmt.Println("Cleaning...") + os.RemoveAll("MyApp") +} +` diff --git a/vendor/github.com/magefile/mage/mage/main.go b/vendor/github.com/magefile/mage/mage/main.go new file mode 100644 index 0000000..b082ec0 --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/main.go @@ -0,0 +1,399 @@ +package mage + +import ( + "crypto/sha1" + "errors" + "flag" + "fmt" + "io" + "log" + "os" + "os/exec" + "path/filepath" + "runtime" + "sort" + "strconv" + "strings" + "text/template" + "time" + "unicode" + + "github.com/magefile/mage/build" + "github.com/magefile/mage/mg" + "github.com/magefile/mage/parse" + "github.com/magefile/mage/sh" +) + +// mageVer is used when hashing the output binary to ensure that we get a new +// binary if we use a differernt version of mage. +const mageVer = "v0.3" + +var output = template.Must(template.New("").Funcs(map[string]interface{}{ + "lower": strings.ToLower, + "lowerfirst": func(s string) string { + r := []rune(s) + return string(unicode.ToLower(r[0])) + string(r[1:]) + }, +}).Parse(tpl)) +var initOutput = template.Must(template.New("").Parse(mageTpl)) + +const mainfile = "mage_output_file.go" +const initFile = "magefile.go" + +// set by ldflags when you "mage build" +var ( + commitHash string + timestamp string + gitTag = "v2" +) + +// Main is the entrypoint for running mage. It exists external to mage's main +// function to allow it to be used from other programs, specifically so you can +// go run a simple file that run's mage's Main. +func Main() int { + return ParseAndRun(".", os.Stdout, os.Stderr, os.Stdin, os.Args[1:]) +} + +// Invocation contains the args for invoking a run of Mage. +type Invocation struct { + Dir string // directory to read magefiles from + Force bool // forces recreation of the compiled binary + Verbose bool // tells the magefile to print out log statements + List bool // tells the magefile to print out a list of targets + Help bool // tells the magefile to print out help for a specific target + Keep bool // tells mage to keep the generated main file after compiling + Timeout time.Duration // tells mage to set a timeout to running the targets + Stdout io.Writer // writer to write stdout messages to + Stderr io.Writer // writer to write stderr messages to + Stdin io.Reader // reader to read stdin from + Args []string // args to pass to the compiled binary +} + +// ParseAndRun parses the command line, and then compiles and runs the mage +// files in the given directory with the given args (do not include the command +// name in the args). +func ParseAndRun(dir string, stdout, stderr io.Writer, stdin io.Reader, args []string) int { + log := log.New(stderr, "", 0) + inv, mageInit, showVersion, err := Parse(stdout, args) + inv.Dir = dir + inv.Stderr = stderr + inv.Stdin = stdin + if err == flag.ErrHelp { + return 0 + } + if err != nil { + log.Println("Error:", err) + return 2 + } + + if showVersion { + if timestamp == "" { + timestamp = "" + } + if commitHash == "" { + commitHash = "" + } + log.Println("Mage Build Tool", gitTag) + log.Println("Build Date:", timestamp) + log.Println("Commit:", commitHash) + return 0 + } + if mageInit { + if err := generateInit(dir); err != nil { + log.Println("Error:", err) + return 1 + } + log.Println(initFile, "created") + return 0 + } + + return Invoke(inv) +} + +// Parse parses the given args and returns structured data. If parse returns +// flag.ErrHelp, the calling process should exit with code 0. +func Parse(stdout io.Writer, args []string) (inv Invocation, mageInit, showVersion bool, err error) { + inv.Stdout = stdout + fs := flag.FlagSet{} + fs.SetOutput(stdout) + fs.BoolVar(&inv.Force, "f", false, "force recreation of compiled magefile") + fs.BoolVar(&inv.Verbose, "v", false, "show verbose output when running mage targets") + fs.BoolVar(&inv.List, "l", false, "list mage targets in this directory") + fs.BoolVar(&inv.Help, "h", false, "show this help") + fs.BoolVar(&mageInit, "init", false, "create a starting template if no mage files exist") + fs.DurationVar(&inv.Timeout, "t", 0, "timeout in duration parsable format (e.g. 5m30s)") + fs.BoolVar(&inv.Keep, "keep", false, "keep intermediate mage files around after running") + fs.BoolVar(&showVersion, "version", false, "show version info for the mage binary") + fs.Usage = func() { + fmt.Fprintln(stdout, "mage [options] [target]") + fmt.Fprintln(stdout, "Options:") + fs.PrintDefaults() + } + err = fs.Parse(args) + if err == flag.ErrHelp { + // parse will have already called fs.Usage() + return inv, mageInit, showVersion, err + } + if err == nil && inv.Help && len(fs.Args()) == 0 { + fs.Usage() + // tell upstream, to just exit + return inv, mageInit, showVersion, flag.ErrHelp + } + + // If verbose is still false, we're going to peek at the environment variable to see if + // MAGE_VERBOSE has been set. If so, we're going to use it for the value of MAGE_VERBOSE. + if inv.Verbose == false { + envVerbose, err := strconv.ParseBool(os.Getenv("MAGE_VERBOSE")) + if err == nil { + inv.Verbose = envVerbose + } + } + numFlags := 0 + if inv.Help { + numFlags++ + } + if mageInit { + numFlags++ + } + if showVersion { + numFlags++ + } + + if numFlags > 1 { + return inv, mageInit, showVersion, errors.New("-h, -init, and -version cannot be used simultaneously") + } + + inv.Args = fs.Args() + if inv.Help && len(inv.Args) > 1 { + return inv, mageInit, showVersion, errors.New("-h can only show help for a single target") + } + + return inv, mageInit, showVersion, err +} + +// Invoke runs Mage with the given arguments. +func Invoke(inv Invocation) int { + log := log.New(inv.Stderr, "", 0) + + files, err := Magefiles(inv.Dir) + if err != nil { + log.Println("Error:", err) + return 1 + } + + if len(files) == 0 { + log.Println("No .go files marked with the mage build tag in this directory.") + return 1 + } + + exePath, err := ExeName(files) + + if err != nil { + log.Println("Error:", err) + return 1 + } + + if !inv.Force { + if _, err := os.Stat(exePath); err == nil { + return RunCompiled(inv, exePath) + } + } + + // parse wants dir + filenames... arg + fnames := make([]string, 0, len(files)) + for i := range files { + fnames = append(fnames, filepath.Base(files[i])) + } + + info, err := parse.Package(inv.Dir, fnames) + if err != nil { + log.Println("Error:", err) + return 1 + } + + hasDupes, names := CheckDupes(info) + if hasDupes { + log.Println("Build targets must be case insensitive, thus the follow targets conflict:") + for _, v := range names { + if len(v) > 1 { + log.Println(" " + strings.Join(v, ", ")) + } + } + return 1 + } + + main := filepath.Join(inv.Dir, mainfile) + if err := GenerateMainfile(main, info); err != nil { + log.Println("Error:", err) + return 1 + } + if !inv.Keep { + defer os.Remove(main) + } + files = append(files, main) + if err := Compile(exePath, inv.Stdout, inv.Stderr, files); err != nil { + log.Println("Error:", err) + return 1 + } + if !inv.Keep { + // remove this file before we run the compiled version, in case the + // compiled file screws things up. Yes this doubles up with the above + // defer, that's ok. + os.Remove(main) + } + + return RunCompiled(inv, exePath) +} + +// CheckDupes checks a package for duplicate target names. +func CheckDupes(info *parse.PkgInfo) (hasDupes bool, names map[string][]string) { + names = map[string][]string{} + lowers := map[string]bool{} + for _, f := range info.Funcs { + low := strings.ToLower(f.Name) + if lowers[low] { + hasDupes = true + } + lowers[low] = true + names[low] = append(names[low], f.Name) + } + return hasDupes, names +} + +type data struct { + Funcs []parse.Function + DefaultError bool + Default string + DefaultFunc parse.Function +} + +// Magefiles returns the list of magefiles in dir. +func Magefiles(dir string) ([]string, error) { + ctx := build.Default + ctx.RequiredTags = []string{"mage"} + ctx.BuildTags = []string{"mage"} + p, err := ctx.ImportDir(dir, 0) + if err != nil { + if _, ok := err.(*build.NoGoError); ok { + return []string{}, nil + } + return nil, err + } + for i := range p.GoFiles { + p.GoFiles[i] = filepath.Join(dir, p.GoFiles[i]) + } + return p.GoFiles, nil +} + +// Compile uses the go tool to compile the files into an executable at path. +func Compile(path string, stdout, stderr io.Writer, gofiles []string) error { + c := exec.Command("go", append([]string{"build", "-o", path}, gofiles...)...) + c.Env = os.Environ() + c.Stderr = stderr + c.Stdout = stdout + err := c.Run() + if err != nil { + return errors.New("error compiling magefiles") + } + if _, err := os.Stat(path); err != nil { + return errors.New("failed to find compiled magefile") + } + return nil +} + +// GenerateMainfile creates the mainfile at path with the info from +func GenerateMainfile(path string, info *parse.PkgInfo) error { + f, err := os.Create(path) + if err != nil { + return fmt.Errorf("can't create mainfile: %v", err) + } + defer f.Close() + + data := data{ + Funcs: info.Funcs, + Default: info.DefaultName, + DefaultFunc: info.DefaultFunc, + } + + data.DefaultError = info.DefaultIsError + + if err := output.Execute(f, data); err != nil { + return fmt.Errorf("can't execute mainfile template: %v", err) + } + return nil +} + +// ExeName reports the executable filename that this version of Mage would +// create for the given magefiles. +func ExeName(files []string) (string, error) { + var hashes []string + for _, s := range files { + h, err := hashFile(s) + if err != nil { + return "", err + } + hashes = append(hashes, h) + } + // hash the mainfile template to ensure if it gets updated, we make a new + // binary. + hashes = append(hashes, fmt.Sprintf("%x", sha1.Sum([]byte(tpl)))) + sort.Strings(hashes) + hash := sha1.Sum([]byte(strings.Join(hashes, "") + mageVer)) + filename := fmt.Sprintf("%x", hash) + + out := filepath.Join(mg.CacheDir(), filename) + if runtime.GOOS == "windows" { + out += ".exe" + } + return out, nil +} + +func hashFile(fn string) (string, error) { + f, err := os.Open(fn) + if err != nil { + return "", fmt.Errorf("can't open input file: %v", err) + } + defer f.Close() + + h := sha1.New() + if _, err := io.Copy(h, f); err != nil { + return "", fmt.Errorf("can't write data to hash: %v", err) + } + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +func generateInit(dir string) error { + f, err := os.Create(filepath.Join(dir, initFile)) + if err != nil { + return fmt.Errorf("could not create mage template: %v", err) + } + defer f.Close() + + if err := initOutput.Execute(f, nil); err != nil { + return fmt.Errorf("can't execute magefile template: %v", err) + } + + return nil +} + +// RunCompiled runs an already-compiled mage command with the given args, +func RunCompiled(inv Invocation, exePath string) int { + c := exec.Command(exePath, inv.Args...) + c.Stderr = inv.Stderr + c.Stdout = inv.Stdout + c.Stdin = inv.Stdin + c.Env = os.Environ() + if inv.Verbose { + c.Env = append(c.Env, "MAGEFILE_VERBOSE=1") + } + if inv.List { + c.Env = append(c.Env, "MAGEFILE_LIST=1") + } + if inv.Help { + c.Env = append(c.Env, "MAGEFILE_HELP=1") + } + if inv.Timeout > 0 { + c.Env = append(c.Env, fmt.Sprintf("MAGEFILE_TIMEOUT=%s", inv.Timeout.String())) + } + return sh.ExitStatus(c.Run()) +} diff --git a/vendor/github.com/magefile/mage/mage/main_test.go b/vendor/github.com/magefile/mage/mage/main_test.go new file mode 100644 index 0000000..a6e4ac2 --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/main_test.go @@ -0,0 +1,479 @@ +package mage + +import ( + "bytes" + "flag" + "fmt" + "go/parser" + "go/token" + "io/ioutil" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + "time" + + "github.com/magefile/mage/build" + "github.com/magefile/mage/mg" +) + +func TestMain(m *testing.M) { + os.Exit(testmain(m)) +} + +func testmain(m *testing.M) int { + // ensure we write our temporary binaries to a directory that we'll delete + // after running tests. + dir := "./testing" + abs, err := filepath.Abs(dir) + if err != nil { + log.Fatal(err) + } + if err := os.Setenv(mg.CacheEnv, abs); err != nil { + log.Fatal(err) + } + if err := os.Mkdir(dir, 0700); err != nil { + if os.IsExist(err) { + os.RemoveAll(dir) + } else { + log.Fatal(err) + } + } + defer os.RemoveAll(dir) + return m.Run() +} + +func TestGoRun(t *testing.T) { + c := exec.Command("go", "run", "main.go") + c.Dir = "./testdata" + c.Env = os.Environ() + b, err := c.CombinedOutput() + if err != nil { + t.Error("error:", err) + } + actual := string(b) + expected := "stuff\n" + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } +} + +func TestVerbose(t *testing.T) { + stderr := &bytes.Buffer{} + stdout := &bytes.Buffer{} + inv := Invocation{ + Dir: "./testdata", + Stdout: stdout, + Stderr: stderr, + Args: []string{"testverbose"}, + } + + code := Invoke(inv) + if code != 0 { + t.Errorf("expected to exit with code 0, but got %v", code) + } + actual := stdout.String() + expected := "" + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } + stderr.Reset() + stdout.Reset() + inv.Verbose = true + code = Invoke(inv) + if code != 0 { + t.Errorf("expected to exit with code 0, but got %v", code) + } + + actual = stderr.String() + expected = "Running target: TestVerbose\nhi!\n" + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } +} + +func TestVerboseEnv(t *testing.T) { + os.Setenv("MAGE_VERBOSE", "true") + + stdout := &bytes.Buffer{} + inv, _, _, err := Parse(stdout, []string{}) + if err != nil { + t.Fatal("unexpected error", err) + } + + expected := true + + if inv.Verbose != true { + t.Fatalf("expected %t, but got %t ", expected, inv.Verbose) + } + + os.Unsetenv("MAGE_VERBOSE") +} + +func TestList(t *testing.T) { + stdout := &bytes.Buffer{} + inv := Invocation{ + Dir: "./testdata/list", + Stdout: stdout, + Stderr: ioutil.Discard, + List: true, + } + + code := Invoke(inv) + if code != 0 { + t.Errorf("expected to exit with code 0, but got %v", code) + } + actual := stdout.String() + expected := ` +Targets: + somePig* This is the synopsis for SomePig. + testVerbose + +* default target +`[1:] + + if actual != expected { + t.Logf("expected: %q", expected) + t.Logf(" actual: %q", actual) + t.Fatalf("expected:\n%v\n\ngot:\n%v", expected, actual) + } +} + +func TestNoArgNoDefaultList(t *testing.T) { + stdout := &bytes.Buffer{} + stderr := &bytes.Buffer{} + inv := Invocation{ + Dir: "testdata/no_default", + Stdout: stdout, + Stderr: stderr, + } + code := Invoke(inv) + if code != 0 { + t.Errorf("expected to exit with code 0, but got %v", code) + } + if err := stderr.String(); err != "" { + t.Errorf("unexpected stderr output:\n%s", err) + } + actual := stdout.String() + expected := ` +Targets: + bazBuz Prints out 'BazBuz'. + fooBar Prints out 'FooBar'. +`[1:] + if actual != expected { + t.Fatalf("expected:\n%q\n\ngot:\n%q", expected, actual) + } +} + +func TestTargetError(t *testing.T) { + stderr := &bytes.Buffer{} + inv := Invocation{ + Dir: "./testdata", + Stdout: ioutil.Discard, + Stderr: stderr, + Args: []string{"returnsnonnilerror"}, + } + code := Invoke(inv) + if code != 1 { + t.Fatalf("expected 1, but got %v", code) + } + actual := stderr.String() + expected := "Error: bang!\n" + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } +} + +func TestStdinCopy(t *testing.T) { + stdout := &bytes.Buffer{} + stdin := strings.NewReader("hi!") + inv := Invocation{ + Dir: "./testdata", + Stderr: ioutil.Discard, + Stdout: stdout, + Stdin: stdin, + Args: []string{"CopyStdin"}, + } + code := Invoke(inv) + if code != 0 { + t.Fatalf("expected 0, but got %v", code) + } + actual := stdout.String() + expected := "hi!" + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } +} + +func TestTargetPanics(t *testing.T) { + stderr := &bytes.Buffer{} + inv := Invocation{ + Dir: "./testdata", + Stdout: ioutil.Discard, + Stderr: stderr, + Args: []string{"panics"}, + } + code := Invoke(inv) + if code != 1 { + t.Fatalf("expected 1, but got %v", code) + } + actual := stderr.String() + expected := "Error: boom!\n" + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } +} + +func TestPanicsErr(t *testing.T) { + stderr := &bytes.Buffer{} + inv := Invocation{ + Dir: "./testdata", + Stdout: ioutil.Discard, + Stderr: stderr, + Args: []string{"panicserr"}, + } + code := Invoke(inv) + if code != 1 { + t.Fatalf("expected 1, but got %v", code) + } + actual := stderr.String() + expected := "Error: kaboom!\n" + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } +} + +// ensure we include the hash of the mainfile template in determining the +// executable name to run, so we automatically create a new exe if the template +// changes. +func TestHashTemplate(t *testing.T) { + templ := tpl + defer func() { tpl = templ }() + name, err := ExeName([]string{"./testdata/func.go", "./testdata/command.go"}) + if err != nil { + t.Fatal(err) + } + tpl = "some other template" + changed, err := ExeName([]string{"./testdata/func.go", "./testdata/command.go"}) + if changed == name { + t.Fatal("expected executable name to chage if template changed") + } +} + +// Test if the -keep flag does keep the mainfile around after running +func TestKeepFlag(t *testing.T) { + buildFile := fmt.Sprintf("./testdata/keep_flag/%s", mainfile) + os.Remove(buildFile) + defer os.Remove(buildFile) + w := tLogWriter{t} + + inv := Invocation{ + Dir: "./testdata/keep_flag", + Stdout: w, + Stderr: w, + List: true, + Keep: true, + Force: true, // need force so we always regenerate + } + code := Invoke(inv) + if code != 0 { + t.Fatalf("expected code 0, but got %v", code) + } + + if _, err := os.Stat(buildFile); err != nil { + t.Fatalf("expected file %q to exist but got err, %v", buildFile, err) + } +} + +type tLogWriter struct { + *testing.T +} + +func (t tLogWriter) Write(b []byte) (n int, err error) { + t.Log(string(b)) + return len(b), nil +} + +// Test if generated mainfile references anything other than the stdlib +func TestOnlyStdLib(t *testing.T) { + buildFile := fmt.Sprintf("./testdata/onlyStdLib/%s", mainfile) + os.Remove(buildFile) + defer os.Remove(buildFile) + + w := tLogWriter{t} + + inv := Invocation{ + Dir: "./testdata/onlyStdLib", + Stdout: w, + Stderr: w, + List: true, + Keep: true, + Force: true, // need force so we always regenerate + Verbose: true, + } + code := Invoke(inv) + if code != 0 { + t.Fatalf("expected code 0, but got %v", code) + } + + if _, err := os.Stat(buildFile); err != nil { + t.Fatalf("expected file %q to exist but got err, %v", buildFile, err) + } + + fset := &token.FileSet{} + // Parse src but stop after processing the imports. + f, err := parser.ParseFile(fset, buildFile, nil, parser.ImportsOnly) + if err != nil { + fmt.Println(err) + return + } + + // Print the imports from the file's AST. + for _, s := range f.Imports { + // the path value comes in as a quoted string, i.e. literally \"context\" + path := strings.Trim(s.Path.Value, "\"") + pkg, err := build.Default.Import(path, "./testdata/keep_flag", build.FindOnly) + if err != nil { + t.Fatal(err) + } + if !filepath.HasPrefix(pkg.Dir, build.Default.GOROOT) { + t.Errorf("import of non-stdlib package: %s", s.Path.Value) + } + } +} + +func TestMultipleTargets(t *testing.T) { + var stderr, stdout bytes.Buffer + inv := Invocation{ + Dir: "./testdata", + Stdout: &stdout, + Stderr: &stderr, + Args: []string{"TestVerbose", "ReturnsNilError"}, + Verbose: true, + } + code := Invoke(inv) + if code != 0 { + t.Errorf("expected 0, but got %v", code) + } + actual := stderr.String() + expected := "Running target: TestVerbose\nhi!\nRunning target: ReturnsNilError\n" + if actual != expected { + t.Errorf("expected %q, but got %q", expected, actual) + } + actual = stdout.String() + expected = "stuff\n" + if actual != expected { + t.Errorf("expected %q, but got %q", expected, actual) + } +} + +func TestFirstTargetFails(t *testing.T) { + var stderr, stdout bytes.Buffer + inv := Invocation{ + Dir: "./testdata", + Stdout: &stdout, + Stderr: &stderr, + Args: []string{"ReturnsNonNilError", "ReturnsNilError"}, + Verbose: true, + } + code := Invoke(inv) + if code != 1 { + t.Errorf("expected 1, but got %v", code) + } + actual := stderr.String() + expected := "Running target: ReturnsNonNilError\nError: bang!\n" + if actual != expected { + t.Errorf("expected %q, but got %q", expected, actual) + } + actual = stdout.String() + expected = "" + if actual != expected { + t.Errorf("expected %q, but got %q", expected, actual) + } +} + +func TestBadSecondTargets(t *testing.T) { + var stderr, stdout bytes.Buffer + inv := Invocation{ + Dir: "./testdata", + Stdout: &stdout, + Stderr: &stderr, + Args: []string{"TestVerbose", "NotGonnaWork"}, + } + code := Invoke(inv) + if code != 2 { + t.Errorf("expected 0, but got %v", code) + } + actual := stderr.String() + expected := "Unknown target specified: NotGonnaWork\n" + if actual != expected { + t.Errorf("expected %q, but got %q", expected, actual) + } + actual = stdout.String() + expected = "" + if actual != expected { + t.Errorf("expected %q, but got %q", expected, actual) + } +} + +func TestParse(t *testing.T) { + buf := &bytes.Buffer{} + inv, init, showVer, err := Parse(buf, []string{"-v", "build"}) + if err != nil { + t.Fatal("unexpected error", err) + } + if init { + t.Fatal("init should be false but was true") + } + if showVer { + t.Fatal("showVersion should be false but was true") + } + if len(inv.Args) != 1 && inv.Args[0] != "build" { + t.Fatalf("expected args to be %q but got %q", []string{"build"}, inv.Args) + } + if s := buf.String(); s != "" { + t.Fatalf("expected no stdout output but got %q", s) + } + +} + +// Test the timeout option +func TestTimeout(t *testing.T) { + stderr := &bytes.Buffer{} + inv := Invocation{ + Dir: "./testdata/context", + Stdout: ioutil.Discard, + Stderr: stderr, + Args: []string{"timeout"}, + Timeout: time.Duration(100 * time.Millisecond), + } + code := Invoke(inv) + if code != 1 { + t.Fatalf("expected 1, but got %v", code) + } + actual := stderr.String() + expected := "Error: context deadline exceeded\n" + + if actual != expected { + t.Fatalf("expected %q, but got %q", expected, actual) + } +} +func TestParseHelp(t *testing.T) { + buf := &bytes.Buffer{} + _, _, _, err := Parse(buf, []string{"-h"}) + if err != flag.ErrHelp { + t.Fatal("unexpected error", err) + } + buf2 := &bytes.Buffer{} + _, _, _, err = Parse(buf2, []string{"--help"}) + if err != flag.ErrHelp { + t.Fatal("unexpected error", err) + } + s := buf.String() + s2 := buf2.String() + if s != s2 { + t.Fatalf("expected -h and --help to produce same output, but got different.\n\n-h:\n%s\n\n--help:\n%s", s, s2) + } +} diff --git a/vendor/github.com/magefile/mage/mage/template.go b/vendor/github.com/magefile/mage/mage/template.go new file mode 100644 index 0000000..cf88873 --- /dev/null +++ b/vendor/github.com/magefile/mage/mage/template.go @@ -0,0 +1,180 @@ +package mage + +// var only for tests +var tpl = `// +build ignore + +package main + +import ( + "context" + "fmt" + "io/ioutil" + "log" + "os" + "strings" + "text/tabwriter" + "time" +) + +func main() { + log.SetFlags(0) + if os.Getenv("MAGEFILE_VERBOSE") == "" { + log.SetOutput(ioutil.Discard) + } + logger := log.New(os.Stderr, "", 0) + if os.Getenv("MAGEFILE_LIST") != "" { + if err := list(); err != nil { + log.Println(err) + os.Exit(1) + } + return + } + + targets := map[string]bool { + {{range .Funcs}}"{{lower .Name}}": true, + {{end}} + } + + var unknown []string + for _, arg := range os.Args[1:] { + if !targets[strings.ToLower(arg)] { + unknown = append(unknown, arg) + } + } + if len(unknown) == 1 { + logger.Println("Unknown target specified:", unknown[0]) + os.Exit(2) + } + if len(unknown) > 1 { + logger.Println("Unknown targets specified:", strings.Join(unknown, ", ")) + os.Exit(2) + } + + if os.Getenv("MAGEFILE_HELP") != "" { + if len(os.Args) < 2 { + logger.Println("no target specified") + os.Exit(1) + } + switch strings.ToLower(os.Args[1]) { + {{range .Funcs}}case "{{lower .Name}}": + fmt.Print("mage {{lower .Name}}:\n\n") + fmt.Println({{printf "%q" .Comment}}) + return + {{end}} + default: + logger.Printf("Unknown target: %q\n", os.Args[1]) + os.Exit(1) + } + } + + + if len(os.Args) < 2 { + {{- if .Default}} + {{.DefaultFunc.TemplateString}} + handleError(logger, err) + return + {{- else}} + if err := list(); err != nil { + logger.Println("Error:", err) + os.Exit(1) + } + return + {{- end}} + } + for _, target := range os.Args[1:] { + switch strings.ToLower(target) { + {{range .Funcs }} + case "{{lower .Name}}": + if os.Getenv("MAGEFILE_VERBOSE") != "" { + logger.Println("Running target:", "{{.Name}}") + } + {{.TemplateString}} + handleError(logger, err) + {{- end}} + default: + // should be impossible since we check this above. + logger.Printf("Unknown target: %q\n", os.Args[1]) + os.Exit(1) + } + } +} + +func list() error { + {{- $default := .Default}} + w := tabwriter.NewWriter(os.Stdout, 0, 4, 4, ' ', 0) + fmt.Println("Targets:") + {{- range .Funcs}} + fmt.Fprintln(w, " {{lowerfirst .Name}}{{if eq .Name $default}}*{{end}}\t" + {{printf "%q" .Synopsis}}) + {{- end}} + err := w.Flush() + {{- if .Default}} + if err == nil { + fmt.Println("\n* default target") + } + {{- end}} + return err +} + +func handleError(logger *log.Logger, err interface{}) { + if err != nil { + logger.Printf("Error: %v\n", err) + type code interface { + ExitStatus() int + } + if c, ok := err.(code); ok { + os.Exit(c.ExitStatus()) + } + os.Exit(1) + } +} + +func runTarget(fn func(context.Context) error) interface{} { + var err interface{} + ctx, cancel := getContext() + d := make(chan interface{}) + go func() { + defer func() { + err := recover() + d <- err + }() + err := fn(ctx) + d <- err + }() + select { + case <-ctx.Done(): + cancel() + e := ctx.Err() + fmt.Printf("ctx err: %v\n", e) + return e + case err = <-d: + cancel() + return err + } +} + +var ctx context.Context +var ctxCancel func() + +func getContext() (context.Context, func()) { + if ctx != nil { + return ctx, ctxCancel + } + + if os.Getenv("MAGEFILE_TIMEOUT") != "" { + timeout, err := time.ParseDuration(os.Getenv("MAGEFILE_TIMEOUT")) + if err != nil { + fmt.Printf("timeout error: %v\n", err) + os.Exit(1) + } + + ctx, ctxCancel = context.WithTimeout(context.Background(), timeout) + } else { + ctx = context.Background() + ctxCancel = func() {} + } + return ctx, ctxCancel +} + + + +` diff --git a/vendor/github.com/magefile/mage/parse/import_go1.9.go b/vendor/github.com/magefile/mage/parse/import_go1.9.go new file mode 100644 index 0000000..9b5c712 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/import_go1.9.go @@ -0,0 +1,13 @@ +// +build go1.9 + +package parse + +import ( + "go/importer" + "go/token" + "go/types" +) + +func getImporter(*token.FileSet) types.Importer { + return importer.For("source", nil) +} diff --git a/vendor/github.com/magefile/mage/parse/import_not_go1.9.go b/vendor/github.com/magefile/mage/parse/import_not_go1.9.go new file mode 100644 index 0000000..ed4e951 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/import_not_go1.9.go @@ -0,0 +1,15 @@ +// +build !go1.9 + +package parse + +import ( + "go/build" + "go/token" + "go/types" + + "github.com/magefile/mage/parse/srcimporter" +) + +func getImporter(fset *token.FileSet) types.Importer { + return srcimporter.New(&build.Default, fset, make(map[string]*types.Package)) +} diff --git a/vendor/github.com/magefile/mage/parse/parse.go b/vendor/github.com/magefile/mage/parse/parse.go new file mode 100644 index 0000000..65f5a34 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/parse.go @@ -0,0 +1,271 @@ +package parse + +import ( + "fmt" + "go/ast" + "go/build" + "go/doc" + "go/parser" + "go/token" + "go/types" + "log" + "os" + "os/exec" + "strings" + + mgTypes "github.com/magefile/mage/types" +) + +type PkgInfo struct { + Funcs []Function + DefaultIsError bool + DefaultIsContext bool + DefaultName string + DefaultFunc Function +} + +// Function represented a job function from a mage file +type Function struct { + Name string + IsError bool + IsContext bool + Synopsis string + Comment string +} + +// TemplateString returns code for the template switch to run the target. +// It wraps each target call to match the func(context.Context) error that +// runTarget requires. +func (f Function) TemplateString() string { + if f.IsContext && f.IsError { + out := `wrapFn := func(ctx context.Context) error { + return %s(ctx) + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + if f.IsContext && !f.IsError { + out := `wrapFn := func(ctx context.Context) error { + %s(ctx) + return nil + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + if !f.IsContext && f.IsError { + out := `wrapFn := func(ctx context.Context) error { + return %s() + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + if !f.IsContext && !f.IsError { + out := `wrapFn := func(ctx context.Context) error { + %s() + return nil + } + err := runTarget(wrapFn)` + return fmt.Sprintf(out, f.Name) + } + return `fmt.Printf("Error formatting job code\n") + os.Exit(1)` +} + +// Package parses a package +func Package(path string, files []string) (*PkgInfo, error) { + fset := token.NewFileSet() + + pkg, err := getPackage(path, files, fset) + if err != nil { + return nil, err + } + + info, err := makeInfo(path, fset, pkg.Files) + if err != nil { + return nil, err + } + + pi := &PkgInfo{} + + p := doc.New(pkg, "./", 0) + for _, f := range p.Funcs { + if f.Recv != "" { + // skip methods + continue + } + if !ast.IsExported(f.Name) { + // skip non-exported functions + continue + } + if typ := voidOrError(f.Decl.Type, info); typ != mgTypes.InvalidType { + pi.Funcs = append(pi.Funcs, Function{ + Name: f.Name, + Comment: f.Doc, + Synopsis: doc.Synopsis(f.Doc), + IsError: typ == mgTypes.ErrorType || typ == mgTypes.ContextErrorType, + IsContext: typ == mgTypes.ContextVoidType || typ == mgTypes.ContextErrorType, + }) + } + } + + setDefault(p, pi, info) + + return pi, nil +} + +func setDefault(p *doc.Package, pi *PkgInfo, info types.Info) { + for _, v := range p.Vars { + for x, name := range v.Names { + if name != "Default" { + continue + } + spec := v.Decl.Specs[x].(*ast.ValueSpec) + if len(spec.Values) != 1 { + log.Println("warning: default declaration has multiple values") + } + id, ok := spec.Values[0].(*ast.Ident) + if !ok { + log.Println("warning: default declaration is not a function name") + } + for _, f := range pi.Funcs { + if f.Name == id.Name { + pi.DefaultName = f.Name + pi.DefaultIsError = f.IsError + pi.DefaultIsContext = f.IsContext + pi.DefaultFunc = f + return + } + } + log.Println("warning: default declaration does not reference a mage target") + } + } +} + +// getPackage returns the non-test package at the given path. +func getPackage(path string, files []string, fset *token.FileSet) (*ast.Package, error) { + fm := make(map[string]bool, len(files)) + for _, f := range files { + fm[f] = true + } + + filter := func(f os.FileInfo) bool { + return fm[f.Name()] + } + + pkgs, err := parser.ParseDir(fset, path, filter, parser.ParseComments) + if err != nil { + return nil, fmt.Errorf("failed to parse directory: %v", err) + } + + for name, pkg := range pkgs { + if !strings.HasSuffix(name, "_test") { + return pkg, nil + } + } + return nil, fmt.Errorf("no non-test packages found in %s", path) +} + +func makeInfo(dir string, fset *token.FileSet, files map[string]*ast.File) (types.Info, error) { + goroot := os.Getenv("GOROOT") + if goroot == "" { + c := exec.Command("go", "env", "GOROOT") + b, err := c.Output() + if err != nil { + return types.Info{}, fmt.Errorf("failed to get GOROOT from 'go env': %v", err) + } + goroot = strings.TrimSpace(string(b)) + if goroot == "" { + return types.Info{}, fmt.Errorf("could not determine GOROOT") + } + } + + build.Default.GOROOT = goroot + + cfg := types.Config{ + Importer: getImporter(fset), + } + + info := types.Info{ + Types: make(map[ast.Expr]types.TypeAndValue), + Defs: make(map[*ast.Ident]types.Object), + Uses: make(map[*ast.Ident]types.Object), + } + + fs := make([]*ast.File, 0, len(files)) + for _, v := range files { + fs = append(fs, v) + } + + _, err := cfg.Check(dir, fset, fs, &info) + if err != nil { + return info, fmt.Errorf("failed to check types in directory: %v", err) + } + return info, nil +} + +// errorOrVoid filters the list of functions to only those that return only an +// error or have no return value, and have no parameters. +func errorOrVoid(fns []*ast.FuncDecl, info types.Info) []*ast.FuncDecl { + fds := []*ast.FuncDecl{} + + for _, fn := range fns { + if voidOrError(fn.Type, info) != mgTypes.InvalidType { + fds = append(fds, fn) + } + } + return fds +} + +func hasContextParam(ft *ast.FuncType, info types.Info) bool { + if ft.Params.NumFields() == 1 { + ret := ft.Params.List[0] + t := info.TypeOf(ret.Type) + if t != nil && t.String() == "context.Context" { + return true + } + } + return false +} + +func hasVoidReturn(ft *ast.FuncType, info types.Info) bool { + res := ft.Results + if res.NumFields() == 0 { + return true + } + return false +} + +func hasErrorReturn(ft *ast.FuncType, info types.Info) bool { + res := ft.Results + if res.NumFields() == 1 { + ret := res.List[0] + if len(ret.Names) > 1 { + return false + } + t := info.TypeOf(ret.Type) + if t != nil && t.String() == "error" { + return true + } + } + return false +} + +func voidOrError(ft *ast.FuncType, info types.Info) mgTypes.FuncType { + if hasContextParam(ft, info) { + if hasVoidReturn(ft, info) { + return mgTypes.ContextVoidType + } + if hasErrorReturn(ft, info) { + return mgTypes.ContextErrorType + } + } + if ft.Params.NumFields() == 0 { + if hasVoidReturn(ft, info) { + return mgTypes.VoidType + } + if hasErrorReturn(ft, info) { + return mgTypes.ErrorType + } + } + return mgTypes.InvalidType +} diff --git a/vendor/github.com/magefile/mage/parse/parse_test.go b/vendor/github.com/magefile/mage/parse/parse_test.go new file mode 100644 index 0000000..3107a57 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/parse_test.go @@ -0,0 +1,58 @@ +package parse + +import ( + "reflect" + "testing" +) + +func TestParse(t *testing.T) { + info, err := Package("./testdata", []string{"func.go", "command.go"}) + if err != nil { + t.Fatal(err) + } + + expected := []Function{ + { + Name: "ReturnsNilError", + IsError: true, + Comment: "Synopsis for \"returns\" error.\nAnd some more text.\n", + Synopsis: `Synopsis for "returns" error.`, + }, + { + Name: "ReturnsVoid", + }, + { + Name: "TakesContextReturnsError", + IsError: true, + IsContext: true, + }, + { + Name: "TakesContextReturnsVoid", + IsError: false, + IsContext: true, + }, + } + + // DefaultIsError + if info.DefaultIsError != true { + t.Fatalf("expected DefaultIsError to be true") + } + + // DefaultName + if info.DefaultName != "ReturnsNilError" { + t.Fatalf("expected DefaultName to be ReturnsNilError") + } + + for _, fn := range expected { + found := false + for _, infoFn := range info.Funcs { + if reflect.DeepEqual(fn, infoFn) { + found = true + break + } + } + if !found { + t.Fatalf("expected:\n%#v\n\nto be in:\n%#v", fn, info.Funcs) + } + } +} diff --git a/vendor/github.com/magefile/mage/parse/srcimporter/sizes.go b/vendor/github.com/magefile/mage/parse/srcimporter/sizes.go new file mode 100644 index 0000000..a9e1b32 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/srcimporter/sizes.go @@ -0,0 +1,40 @@ +// +build !go1.9 + +package srcimporter + +import "go/types" + +// common architecture word sizes and alignments +var gcArchSizes = map[string]*types.StdSizes{ + "386": {4, 4}, + "arm": {4, 4}, + "arm64": {8, 8}, + "amd64": {8, 8}, + "amd64p32": {4, 8}, + "mips": {4, 4}, + "mipsle": {4, 4}, + "mips64": {8, 8}, + "mips64le": {8, 8}, + "ppc64": {8, 8}, + "ppc64le": {8, 8}, + "s390x": {8, 8}, + // When adding more architectures here, + // update the doc string of SizesFor below. +} + +// SizesFor returns the Sizes used by a compiler for an architecture. +// The result is nil if a compiler/architecture pair is not known. +// +// Supported architectures for compiler "gc": +// "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle", +// "mips64", "mips64le", "ppc64", "ppc64le", "s390x". +func SizesFor(compiler, arch string) types.Sizes { + if compiler != "gc" { + return nil + } + s, ok := gcArchSizes[arch] + if !ok { + return nil + } + return s +} diff --git a/vendor/github.com/magefile/mage/parse/srcimporter/srcimporter.go b/vendor/github.com/magefile/mage/parse/srcimporter/srcimporter.go new file mode 100644 index 0000000..a488a99 --- /dev/null +++ b/vendor/github.com/magefile/mage/parse/srcimporter/srcimporter.go @@ -0,0 +1,213 @@ +// +build !go1.9 + +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package srcimporter implements importing directly +// from source files rather than installed packages. +package srcimporter + +import ( + "fmt" + "go/ast" + "go/build" + "go/parser" + "go/token" + "go/types" + "path/filepath" + "sync" +) + +// An Importer provides the context for importing packages from source code. +type Importer struct { + ctxt *build.Context + fset *token.FileSet + sizes types.Sizes + packages map[string]*types.Package +} + +// NewImporter returns a new Importer for the given context, file set, and map +// of packages. The context is used to resolve import paths to package paths, +// and identifying the files belonging to the package. If the context provides +// non-nil file system functions, they are used instead of the regular package +// os functions. The file set is used to track position information of package +// files; and imported packages are added to the packages map. +func New(ctxt *build.Context, fset *token.FileSet, packages map[string]*types.Package) *Importer { + return &Importer{ + ctxt: ctxt, + fset: fset, + sizes: SizesFor(ctxt.Compiler, ctxt.GOARCH), // uses go/types default if GOARCH not found + packages: packages, + } +} + +// Importing is a sentinel taking the place in Importer.packages +// for a package that is in the process of being imported. +var importing types.Package + +// Import(path) is a shortcut for ImportFrom(path, "", 0). +func (p *Importer) Import(path string) (*types.Package, error) { + return p.ImportFrom(path, "", 0) +} + +// ImportFrom imports the package with the given import path resolved from the given srcDir, +// adds the new package to the set of packages maintained by the importer, and returns the +// package. Package path resolution and file system operations are controlled by the context +// maintained with the importer. The import mode must be zero but is otherwise ignored. +// Packages that are not comprised entirely of pure Go files may fail to import because the +// type checker may not be able to determine all exported entities (e.g. due to cgo dependencies). +func (p *Importer) ImportFrom(path, srcDir string, mode types.ImportMode) (*types.Package, error) { + if mode != 0 { + panic("non-zero import mode") + } + + // determine package path (do vendor resolution) + var bp *build.Package + var err error + switch { + default: + if abs, err := p.absPath(srcDir); err == nil { // see issue #14282 + srcDir = abs + } + bp, err = p.ctxt.Import(path, srcDir, build.FindOnly) + + case build.IsLocalImport(path): + // "./x" -> "srcDir/x" + bp, err = p.ctxt.ImportDir(filepath.Join(srcDir, path), build.FindOnly) + + case p.isAbsPath(path): + return nil, fmt.Errorf("invalid absolute import path %q", path) + } + if err != nil { + return nil, err // err may be *build.NoGoError - return as is + } + + // package unsafe is known to the type checker + if bp.ImportPath == "unsafe" { + return types.Unsafe, nil + } + + // no need to re-import if the package was imported completely before + pkg := p.packages[bp.ImportPath] + if pkg != nil { + if pkg == &importing { + return nil, fmt.Errorf("import cycle through package %q", bp.ImportPath) + } + if !pkg.Complete() { + // Package exists but is not complete - we cannot handle this + // at the moment since the source importer replaces the package + // wholesale rather than augmenting it (see #19337 for details). + // Return incomplete package with error (see #16088). + return pkg, fmt.Errorf("reimported partially imported package %q", bp.ImportPath) + } + return pkg, nil + } + + p.packages[bp.ImportPath] = &importing + defer func() { + // clean up in case of error + // TODO(gri) Eventually we may want to leave a (possibly empty) + // package in the map in all cases (and use that package to + // identify cycles). See also issue 16088. + if p.packages[bp.ImportPath] == &importing { + p.packages[bp.ImportPath] = nil + } + }() + + // collect package files + bp, err = p.ctxt.ImportDir(bp.Dir, 0) + if err != nil { + return nil, err // err may be *build.NoGoError - return as is + } + var filenames []string + filenames = append(filenames, bp.GoFiles...) + filenames = append(filenames, bp.CgoFiles...) + + files, err := p.parseFiles(bp.Dir, filenames) + if err != nil { + return nil, err + } + + // type-check package files + conf := types.Config{ + IgnoreFuncBodies: true, + FakeImportC: true, + Importer: p, + Sizes: p.sizes, + } + pkg, err = conf.Check(bp.ImportPath, p.fset, files, nil) + if err != nil { + // Type-checking stops after the first error (types.Config.Error is not set), + // so the returned package is very likely incomplete. Don't return it since + // we don't know its condition: It's very likely unsafe to use and it's also + // not added to p.packages which may cause further problems (issue #20837). + return nil, fmt.Errorf("type-checking package %q failed (%v)", bp.ImportPath, err) + } + + p.packages[bp.ImportPath] = pkg + return pkg, nil +} + +func (p *Importer) parseFiles(dir string, filenames []string) ([]*ast.File, error) { + open := p.ctxt.OpenFile // possibly nil + + files := make([]*ast.File, len(filenames)) + errors := make([]error, len(filenames)) + + var wg sync.WaitGroup + wg.Add(len(filenames)) + for i, filename := range filenames { + go func(i int, filepath string) { + defer wg.Done() + if open != nil { + src, err := open(filepath) + if err != nil { + errors[i] = fmt.Errorf("opening package file %s failed (%v)", filepath, err) + return + } + files[i], errors[i] = parser.ParseFile(p.fset, filepath, src, 0) + src.Close() // ignore Close error - parsing may have succeeded which is all we need + } else { + // Special-case when ctxt doesn't provide a custom OpenFile and use the + // parser's file reading mechanism directly. This appears to be quite a + // bit faster than opening the file and providing an io.ReaderCloser in + // both cases. + // TODO(gri) investigate performance difference (issue #19281) + files[i], errors[i] = parser.ParseFile(p.fset, filepath, nil, 0) + } + }(i, p.joinPath(dir, filename)) + } + wg.Wait() + + // if there are errors, return the first one for deterministic results + for _, err := range errors { + if err != nil { + return nil, err + } + } + + return files, nil +} + +// context-controlled file system operations + +func (p *Importer) absPath(path string) (string, error) { + // TODO(gri) This should be using p.ctxt.AbsPath which doesn't + // exist but probably should. See also issue #14282. + return filepath.Abs(path) +} + +func (p *Importer) isAbsPath(path string) bool { + if f := p.ctxt.IsAbsPath; f != nil { + return f(path) + } + return filepath.IsAbs(path) +} + +func (p *Importer) joinPath(elem ...string) string { + if f := p.ctxt.JoinPath; f != nil { + return f(elem...) + } + return filepath.Join(elem...) +} diff --git a/vendor/github.com/magefile/mage/sh/cmd.go b/vendor/github.com/magefile/mage/sh/cmd.go new file mode 100644 index 0000000..23fc372 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/cmd.go @@ -0,0 +1,165 @@ +package sh + +import ( + "bytes" + "fmt" + "io" + "log" + "os" + "os/exec" + "strings" + + "github.com/magefile/mage/mg" +) + +// RunCmd returns a function that will call Run with the given command. This is +// useful for creating command aliases to make your scripts easier to read, like +// this: +// +// // in a helper file somewhere +// var g0 = sh.RunCmd("go") // go is a keyword :( +// +// // somewhere in your main code +// if err := g0("install", "github.com/gohugo/hugo"); err != nil { +// return err +// } +// +// Args passed to command get baked in as args to the command when you run it. +// Any args passed in when you run the returned function will be appended to the +// original args. For example, this is equivalent to the above: +// +// var goInstall = sh.RunCmd("go", "install") goInstall("github.com/gohugo/hugo") +// +// RunCmd uses Exec underneath, so see those docs for more details. +func RunCmd(cmd string, args ...string) func(args ...string) error { + return func(args2 ...string) error { + return Run(cmd, append(args, args2...)...) + } +} + +// OutCmd is like RunCmd except the command returns the output of the +// command. +func OutCmd(cmd string, args ...string) func(args ...string) (string, error) { + return func(args2 ...string) (string, error) { + return Output(cmd, append(args, args2...)...) + } +} + +// Run is like RunWith, but doesn't specify any environment variables. +func Run(cmd string, args ...string) error { + return RunWith(nil, cmd, args...) +} + +// RunV is like Run, but always sends the command's stdout to os.Stdout. +func RunV(cmd string, args ...string) error { + _, err := Exec(nil, os.Stdout, os.Stderr, cmd, args...) + return err +} + +// RunWith runs the given command, directing stderr to this program's stderr and +// printing stdout to stdout if mage was run with -v. It adds adds env to the +// environment variables for the command being run. Environment variables should +// be in the format name=value. +func RunWith(env map[string]string, cmd string, args ...string) error { + var output io.Writer + if mg.Verbose() { + output = os.Stdout + } + _, err := Exec(env, output, os.Stderr, cmd, args...) + return err +} + +// Output runs the command and returns the text from stdout. +func Output(cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(nil, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// OutputWith is like RunWith, ubt returns what is written to stdout. +func OutputWith(env map[string]string, cmd string, args ...string) (string, error) { + buf := &bytes.Buffer{} + _, err := Exec(env, buf, os.Stderr, cmd, args...) + return strings.TrimSuffix(buf.String(), "\n"), err +} + +// Exec executes the command, piping its stderr to mage's stderr and +// piping its stdout to the given writer. If the command fails, it will return +// an error that, if returned from a target or mg.Deps call, will cause mage to +// exit with the same code as the command failed with. Env is a list of +// environment variables to set when running the command, these override the +// current environment variables set (which are also passed to the command). cmd +// and args may include references to environment variables in $FOO format, in +// which case these will be expanded before the command is run. +// +// Ran reports if the command ran (rather than was not found or not executable). +// Code reports the exit code the command returned if it ran. If err == nil, ran +// is always true and code is always 0. +func Exec(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, err error) { + expand := func(s string) string { + s2, ok := env[s] + if ok { + return s2 + } + return os.Getenv(s) + } + cmd = os.Expand(cmd, expand) + for i := range args { + args[i] = os.Expand(args[i], expand) + } + ran, code, err := run(env, stdout, stderr, cmd, args...) + if err == nil { + return true, nil + } + if ran { + return ran, mg.Fatalf(code, `running "%s %s" failed with exit code %d`, cmd, strings.Join(args, " "), code) + } + return ran, fmt.Errorf(`failed to run "%s %s: %v"`, cmd, strings.Join(args, " "), err) +} + +func run(env map[string]string, stdout, stderr io.Writer, cmd string, args ...string) (ran bool, code int, err error) { + c := exec.Command(cmd, args...) + c.Env = os.Environ() + for k, v := range env { + c.Env = append(c.Env, k+"="+v) + } + c.Stderr = stderr + c.Stdout = stdout + c.Stdin = os.Stdin + log.Println("exec:", cmd, strings.Join(args, " ")) + err = c.Run() + return cmdRan(err), ExitStatus(err), err +} + +func cmdRan(err error) bool { + if err == nil { + return true + } + ee, ok := err.(*exec.ExitError) + if ok { + return ee.Exited() + } + return false +} + +type exitStatus interface { + ExitStatus() int +} + +// ExitStatus returns the exit status of the error if it is an exec.ExitError +// or if it implements ExitStatus() int. +// 0 if it is nil or 1 if it is a different error. +func ExitStatus(err error) int { + if err == nil { + return 0 + } + if e, ok := err.(exitStatus); ok { + return e.ExitStatus() + } + if e, ok := err.(*exec.ExitError); ok { + if ex, ok := e.Sys().(exitStatus); ok { + return ex.ExitStatus() + } + } + return 1 +} diff --git a/vendor/github.com/magefile/mage/sh/cmd_test.go b/vendor/github.com/magefile/mage/sh/cmd_test.go new file mode 100644 index 0000000..c2f5d04 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/cmd_test.go @@ -0,0 +1,72 @@ +package sh + +import ( + "bytes" + "os" + "testing" +) + +func TestOutCmd(t *testing.T) { + cmd := OutCmd(os.Args[0], "-printArgs", "foo", "bar") + out, err := cmd("baz", "bat") + if err != nil { + t.Fatal(err) + } + expected := "[foo bar baz bat]" + if out != expected { + t.Fatalf("expected %q but got %q", expected, out) + } +} + +func TestExitCode(t *testing.T) { + ran, err := Exec(nil, nil, nil, os.Args[0], "-helper", "-exit", "99") + if err == nil { + t.Fatal("unexpected nil error from run") + } + if !ran { + t.Errorf("ran returned as false, but should have been true") + } + code := ExitStatus(err) + if code != 99 { + t.Fatalf("expected exit status 99, but got %v", code) + } +} + +func TestEnv(t *testing.T) { + env := "SOME_REALLY_LONG_MAGEFILE_SPECIFIC_THING" + out := &bytes.Buffer{} + ran, err := Exec(map[string]string{env: "foobar"}, out, nil, os.Args[0], "-printVar", env) + if err != nil { + t.Fatalf("unexpected error from runner: %#v", err) + } + if !ran { + t.Errorf("expected ran to be true but was false.") + } + if out.String() != "foobar\n" { + t.Errorf("expected foobar, got %q", out) + } +} + +func TestNotRun(t *testing.T) { + ran, err := Exec(nil, nil, nil, "thiswontwork") + if err == nil { + t.Fatal("unexpected nil error") + } + if ran { + t.Fatal("expected ran to be false but was true") + } +} + +func TestAutoExpand(t *testing.T) { + if err := os.Setenv("MAGE_FOOBAR", "baz"); err != nil { + t.Fatal(err) + } + s, err := Output("echo", "$MAGE_FOOBAR") + if err != nil { + t.Fatal(err) + } + if s != "baz" { + t.Fatalf(`Expected "baz" but got %q`, s) + } + +} diff --git a/vendor/github.com/magefile/mage/sh/helpers.go b/vendor/github.com/magefile/mage/sh/helpers.go new file mode 100644 index 0000000..86b075e --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/helpers.go @@ -0,0 +1,16 @@ +package sh + +import ( + "fmt" + "os" +) + +// Rm removes the given file or directory even if non-empty. It will not return +// an error if the target doesn't exist, only if the target cannot be removed. +func Rm(path string) error { + err := os.RemoveAll(path) + if err == nil || os.IsNotExist(err) { + return nil + } + return fmt.Errorf(`failed to remove %s: %v`, path, err) +} diff --git a/vendor/github.com/magefile/mage/sh/testmain_test.go b/vendor/github.com/magefile/mage/sh/testmain_test.go new file mode 100644 index 0000000..5869c54 --- /dev/null +++ b/vendor/github.com/magefile/mage/sh/testmain_test.go @@ -0,0 +1,46 @@ +package sh + +import ( + "flag" + "fmt" + "os" + "testing" +) + +var ( + helperCmd bool + printArgs bool + stderr string + stdout string + exitCode int + printVar string +) + +func init() { + flag.BoolVar(&helperCmd, "helper", false, "") + flag.BoolVar(&printArgs, "printArgs", false, "") + flag.StringVar(&stderr, "stderr", "", "") + flag.StringVar(&stdout, "stdout", "", "") + flag.IntVar(&exitCode, "exit", 0, "") + flag.StringVar(&printVar, "printVar", "", "") +} + +func TestMain(m *testing.M) { + flag.Parse() + + if printArgs { + fmt.Println(flag.Args()) + return + } + if printVar != "" { + fmt.Println(os.Getenv(printVar)) + return + } + + if helperCmd { + fmt.Fprintln(os.Stderr, stderr) + fmt.Fprintln(os.Stdout, stdout) + os.Exit(exitCode) + } + os.Exit(m.Run()) +} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml index 5c14c13..d9deadb 100644 --- a/vendor/github.com/mitchellh/mapstructure/.travis.yml +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -1,7 +1,8 @@ -language: go +language: go + +go: + - 1.9.x + - tip -go: - - 1.8.1 - script: - - go test + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md index 659d688..7ecc785 100644 --- a/vendor/github.com/mitchellh/mapstructure/README.md +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -1,4 +1,4 @@ -# mapstructure +# mapstructure [![Godoc](https://godoc.org/github.com/mitchell/mapstructure?status.svg)](https://godoc.org/github.com/mitchell/mapstructure) mapstructure is a Go library for decoding generic map values to structures and vice versa, while providing helpful error handling. diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go index 30a9957..39ec1e9 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -237,6 +237,8 @@ func (d *Decoder) decode(name string, data interface{}, val reflect.Value) error err = d.decodePtr(name, data, val) case reflect.Slice: err = d.decodeSlice(name, data, val) + case reflect.Array: + err = d.decodeArray(name, data, val) case reflect.Func: err = d.decodeFunc(name, data, val) default: @@ -292,12 +294,22 @@ func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) - case dataKind == reflect.Slice && d.config.WeaklyTypedInput: + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: dataType := dataVal.Type() elemKind := dataType.Elem().Kind() - switch { - case elemKind == reflect.Uint8: - val.SetString(string(dataVal.Interface().([]uint8))) + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) default: converted = false } @@ -647,6 +659,73 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) return nil } +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { dataVal := reflect.Indirect(reflect.ValueOf(data)) @@ -716,7 +795,7 @@ func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) errors = appendErrors(errors, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) } else { - structs = append(structs, val.FieldByName(fieldType.Name)) + structs = append(structs, structVal.FieldByName(fieldType.Name)) } continue } diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go index 08e4956..ecfb769 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_bugs_test.go @@ -258,3 +258,21 @@ func TestDecodeSliceToEmptySliceWOZeroing(t *testing.T) { } } } + +// #70 +func TestNextSquashMapstructure(t *testing.T) { + data := &struct { + Level1 struct { + Level2 struct { + Foo string + } `mapstructure:",squash"` + } `mapstructure:",squash"` + }{} + err := Decode(map[interface{}]interface{}{"foo": "baz"}, &data) + if err != nil { + t.Fatalf("should not error: %s", err) + } + if data.Level1.Level2.Foo != "baz" { + t.Fatal("value should be baz") + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go index 547af73..89861ed 100644 --- a/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure_test.go @@ -49,6 +49,13 @@ type EmbeddedSlice struct { Vunique string } +type ArrayAlias [2]string + +type EmbeddedArray struct { + ArrayAlias `mapstructure:"array_alias"` + Vunique string +} + type SquashOnNonStructType struct { InvalidSquashType int `mapstructure:",squash"` } @@ -85,6 +92,15 @@ type SliceOfStruct struct { Value []Basic } +type Array struct { + Vfoo string + Vbar [2]string +} + +type ArrayOfStruct struct { + Value [2]Basic +} + type Func struct { Foo func() string } @@ -112,14 +128,19 @@ type TypeConversionResult struct { FloatToBool bool FloatToString string SliceUint8ToString string + ArrayUint8ToString string StringToInt int StringToUint uint StringToBool bool StringToFloat float32 StringToStrSlice []string StringToIntSlice []int + StringToStrArray [1]string + StringToIntArray [1]int SliceToMap map[string]interface{} MapToSlice []interface{} + ArrayToMap map[string]interface{} + MapToArray [1]interface{} } func TestBasicTypes(t *testing.T) { @@ -322,6 +343,29 @@ func TestDecode_EmbeddedSlice(t *testing.T) { } } +func TestDecode_EmbeddedArray(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "array_alias": [2]string{"foo", "bar"}, + "vunique": "bar", + } + + var result EmbeddedArray + err := Decode(input, &result) + if err != nil { + t.Fatalf("got an err: %s", err.Error()) + } + + if !reflect.DeepEqual(result.ArrayAlias, ArrayAlias([2]string{"foo", "bar"})) { + t.Errorf("array value: %#v", result.ArrayAlias) + } + + if result.Vunique != "bar" { + t.Errorf("vunique value should be 'bar': %#v", result.Vunique) + } +} + func TestDecode_EmbeddedSquash(t *testing.T) { t.Parallel() @@ -582,14 +626,19 @@ func TestDecode_TypeConversion(t *testing.T) { "FloatToBool": 42.42, "FloatToString": 42.42, "SliceUint8ToString": []uint8("foo"), + "ArrayUint8ToString": [3]uint8{'f', 'o', 'o'}, "StringToInt": "42", "StringToUint": "42", "StringToBool": "1", "StringToFloat": "42.42", "StringToStrSlice": "A", "StringToIntSlice": "42", + "StringToStrArray": "A", + "StringToIntArray": "42", "SliceToMap": []interface{}{}, "MapToSlice": map[string]interface{}{}, + "ArrayToMap": []interface{}{}, + "MapToArray": map[string]interface{}{}, } expectedResultStrict := TypeConversionResult{ @@ -622,14 +671,19 @@ func TestDecode_TypeConversion(t *testing.T) { FloatToBool: true, FloatToString: "42.42", SliceUint8ToString: "foo", + ArrayUint8ToString: "foo", StringToInt: 42, StringToUint: 42, StringToBool: true, StringToFloat: 42.42, StringToStrSlice: []string{"A"}, StringToIntSlice: []int{42}, + StringToStrArray: [1]string{"A"}, + StringToIntArray: [1]int{42}, SliceToMap: map[string]interface{}{}, MapToSlice: []interface{}{}, + ArrayToMap: map[string]interface{}{}, + MapToArray: [1]interface{}{}, } // Test strict type conversion @@ -965,6 +1019,99 @@ func TestSliceToMap(t *testing.T) { } } +func TestArray(t *testing.T) { + t.Parallel() + + inputStringArray := map[string]interface{}{ + "vfoo": "foo", + "vbar": [2]string{"foo", "bar"}, + } + + inputStringArrayPointer := map[string]interface{}{ + "vfoo": "foo", + "vbar": &[2]string{"foo", "bar"}, + } + + outputStringArray := &Array{ + "foo", + [2]string{"foo", "bar"}, + } + + testArrayInput(t, inputStringArray, outputStringArray) + testArrayInput(t, inputStringArrayPointer, outputStringArray) +} + +func TestInvalidArray(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "vfoo": "foo", + "vbar": 42, + } + + result := Array{} + err := Decode(input, &result) + if err == nil { + t.Errorf("expected failure") + } +} + +func TestArrayOfStruct(t *testing.T) { + t.Parallel() + + input := map[string]interface{}{ + "value": []map[string]interface{}{ + {"vstring": "one"}, + {"vstring": "two"}, + }, + } + + var result ArrayOfStruct + err := Decode(input, &result) + if err != nil { + t.Fatalf("got unexpected error: %s", err) + } + + if len(result.Value) != 2 { + t.Fatalf("expected two values, got %d", len(result.Value)) + } + + if result.Value[0].Vstring != "one" { + t.Errorf("first value should be 'one', got: %s", result.Value[0].Vstring) + } + + if result.Value[1].Vstring != "two" { + t.Errorf("second value should be 'two', got: %s", result.Value[1].Vstring) + } +} + +func TestArrayToMap(t *testing.T) { + t.Parallel() + + input := []map[string]interface{}{ + { + "foo": "bar", + }, + { + "bar": "baz", + }, + } + + var result map[string]interface{} + err := WeakDecode(input, &result) + if err != nil { + t.Fatalf("got an error: %s", err) + } + + expected := map[string]interface{}{ + "foo": "bar", + "bar": "baz", + } + if !reflect.DeepEqual(result, expected) { + t.Errorf("bad: %#v", result) + } +} + func TestInvalidType(t *testing.T) { t.Parallel() @@ -1191,3 +1338,31 @@ func testSliceInput(t *testing.T, input map[string]interface{}, expected *Slice) } } } + +func testArrayInput(t *testing.T, input map[string]interface{}, expected *Array) { + var result Array + err := Decode(input, &result) + if err != nil { + t.Fatalf("got error: %s", err) + } + + if result.Vfoo != expected.Vfoo { + t.Errorf("Vfoo expected '%s', got '%s'", expected.Vfoo, result.Vfoo) + } + + if result.Vbar == [2]string{} { + t.Fatalf("Vbar a slice, got '%#v'", result.Vbar) + } + + if len(result.Vbar) != len(expected.Vbar) { + t.Errorf("Vbar length should be %d, got %d", len(expected.Vbar), len(result.Vbar)) + } + + for i, v := range result.Vbar { + if v != expected.Vbar[i] { + t.Errorf( + "Vbar[%d] should be '%#v', got '%#v'", + i, expected.Vbar[i], v) + } + } +} diff --git a/vendor/github.com/olekukonko/tablewriter/README.md b/vendor/github.com/olekukonko/tablewriter/README.md index e9665d2..ae12208 100644 --- a/vendor/github.com/olekukonko/tablewriter/README.md +++ b/vendor/github.com/olekukonko/tablewriter/README.md @@ -1,7 +1,9 @@ ASCII Table Writer ========= -[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) [![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter) +[![Build Status](https://travis-ci.org/olekukonko/tablewriter.png?branch=master)](https://travis-ci.org/olekukonko/tablewriter) +[![Total views](https://img.shields.io/sourcegraph/rrc/github.com/olekukonko/tablewriter.svg)](https://sourcegraph.com/github.com/olekukonko/tablewriter) +[![Godoc](https://godoc.org/github.com/olekukonko/tablewriter?status.svg)](https://godoc.org/github.com/olekukonko/tablewriter) Generate ASCII table on the fly ... Installation is simple as diff --git a/vendor/github.com/olekukonko/tablewriter/table.go b/vendor/github.com/olekukonko/tablewriter/table.go index 4d07251..8b62c62 100644 --- a/vendor/github.com/olekukonko/tablewriter/table.go +++ b/vendor/github.com/olekukonko/tablewriter/table.go @@ -292,6 +292,11 @@ func (t *Table) AppendBulk(rows [][]string) { } } +// NumLines to get the number of lines +func (t *Table) NumLines() int { + return len(t.lines) +} + // Clear rows func (t *Table) ClearRows() { t.lines = [][][]string{} diff --git a/vendor/github.com/olekukonko/tablewriter/table_test.go b/vendor/github.com/olekukonko/tablewriter/table_test.go index 1a6022e..a8586bb 100644 --- a/vendor/github.com/olekukonko/tablewriter/table_test.go +++ b/vendor/github.com/olekukonko/tablewriter/table_test.go @@ -76,6 +76,31 @@ func ExampleCSV() { // *============*===========*=========* } +// TestNumLines to test the numbers of lines +func TestNumLines(t *testing.T) { + data := [][]string{ + []string{"A", "The Good", "500"}, + []string{"B", "The Very very Bad Man", "288"}, + []string{"C", "The Ugly", "120"}, + []string{"D", "The Gopher", "800"}, + } + + buf := &bytes.Buffer{} + table := NewWriter(buf) + table.SetHeader([]string{"Name", "Sign", "Rating"}) + + for i, v := range data { + table.Append(v) + if i+1 != table.NumLines() { + t.Errorf("Number of lines failed\ngot:\n[%d]\nwant:\n[%d]\n", table.NumLines(), i+1) + } + } + + if len(data) != table.NumLines() { + t.Errorf("Number of lines failed\ngot:\n[%d]\nwant:\n[%d]\n", table.NumLines(), len(data)) + } +} + func TestCSVInfo(t *testing.T) { buf := &bytes.Buffer{} table, err := NewCSV(buf, "test_info.csv", true) @@ -853,7 +878,7 @@ func TestCustomAlign(t *testing.T) { | A | B | CCCC | +-----+-----+-------+ ` - ) + ) table.SetHeader(header) table.SetFooter(footer) table.AppendBulk(data) diff --git a/vendor/github.com/satori/go.uuid/.travis.yml b/vendor/github.com/satori/go.uuid/.travis.yml index 38517e2..20dd53b 100644 --- a/vendor/github.com/satori/go.uuid/.travis.yml +++ b/vendor/github.com/satori/go.uuid/.travis.yml @@ -6,6 +6,14 @@ go: - 1.4 - 1.5 - 1.6 + - 1.7 + - 1.8 + - 1.9 + - tip +matrix: + allow_failures: + - go: tip + fast_finish: true before_install: - go get github.com/mattn/goveralls - go get golang.org/x/tools/cmd/cover diff --git a/vendor/github.com/satori/go.uuid/LICENSE b/vendor/github.com/satori/go.uuid/LICENSE index 488357b..926d549 100644 --- a/vendor/github.com/satori/go.uuid/LICENSE +++ b/vendor/github.com/satori/go.uuid/LICENSE @@ -1,4 +1,4 @@ -Copyright (C) 2013-2016 by Maxim Bublis +Copyright (C) 2013-2018 by Maxim Bublis Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the diff --git a/vendor/github.com/satori/go.uuid/README.md b/vendor/github.com/satori/go.uuid/README.md index b6aad1c..7b1a722 100644 --- a/vendor/github.com/satori/go.uuid/README.md +++ b/vendor/github.com/satori/go.uuid/README.md @@ -59,7 +59,7 @@ func main() { ## Copyright -Copyright (C) 2013-2016 by Maxim Bublis . +Copyright (C) 2013-2018 by Maxim Bublis . UUID package released under MIT License. See [LICENSE](https://github.com/satori/go.uuid/blob/master/LICENSE) for details. diff --git a/vendor/github.com/satori/go.uuid/benchmarks_test.go b/vendor/github.com/satori/go.uuid/benchmarks_test.go deleted file mode 100644 index b4e567f..0000000 --- a/vendor/github.com/satori/go.uuid/benchmarks_test.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (C) 2013-2015 by Maxim Bublis -// -// Permission is hereby granted, free of charge, to any person obtaining -// a copy of this software and associated documentation files (the -// "Software"), to deal in the Software without restriction, including -// without limitation the rights to use, copy, modify, merge, publish, -// distribute, sublicense, and/or sell copies of the Software, and to -// permit persons to whom the Software is furnished to do so, subject to -// the following conditions: -// -// The above copyright notice and this permission notice shall be -// included in all copies or substantial portions of the Software. -// -// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - -package uuid - -import ( - "testing" -) - -func BenchmarkFromBytes(b *testing.B) { - bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - for i := 0; i < b.N; i++ { - FromBytes(bytes) - } -} - -func BenchmarkFromString(b *testing.B) { - s := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - for i := 0; i < b.N; i++ { - FromString(s) - } -} - -func BenchmarkFromStringUrn(b *testing.B) { - s := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" - for i := 0; i < b.N; i++ { - FromString(s) - } -} - -func BenchmarkFromStringWithBrackets(b *testing.B) { - s := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" - for i := 0; i < b.N; i++ { - FromString(s) - } -} - -func BenchmarkNewV1(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV1() - } -} - -func BenchmarkNewV2(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV2(DomainPerson) - } -} - -func BenchmarkNewV3(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV3(NamespaceDNS, "www.example.com") - } -} - -func BenchmarkNewV4(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV4() - } -} - -func BenchmarkNewV5(b *testing.B) { - for i := 0; i < b.N; i++ { - NewV5(NamespaceDNS, "www.example.com") - } -} - -func BenchmarkMarshalBinary(b *testing.B) { - u := NewV4() - for i := 0; i < b.N; i++ { - u.MarshalBinary() - } -} - -func BenchmarkMarshalText(b *testing.B) { - u := NewV4() - for i := 0; i < b.N; i++ { - u.MarshalText() - } -} - -func BenchmarkUnmarshalBinary(b *testing.B) { - bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - u := UUID{} - for i := 0; i < b.N; i++ { - u.UnmarshalBinary(bytes) - } -} - -func BenchmarkUnmarshalText(b *testing.B) { - bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - u := UUID{} - for i := 0; i < b.N; i++ { - u.UnmarshalText(bytes) - } -} - -func BenchmarkMarshalToString(b *testing.B) { - u := NewV4() - for i := 0; i < b.N; i++ { - u.String() - } -} diff --git a/vendor/github.com/satori/go.uuid/codec.go b/vendor/github.com/satori/go.uuid/codec.go new file mode 100644 index 0000000..656892c --- /dev/null +++ b/vendor/github.com/satori/go.uuid/codec.go @@ -0,0 +1,206 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + "encoding/hex" + "fmt" +) + +// FromBytes returns UUID converted from raw byte slice input. +// It will return error if the slice isn't 16 bytes long. +func FromBytes(input []byte) (u UUID, err error) { + err = u.UnmarshalBinary(input) + return +} + +// FromBytesOrNil returns UUID converted from raw byte slice input. +// Same behavior as FromBytes, but returns a Nil UUID on error. +func FromBytesOrNil(input []byte) UUID { + uuid, err := FromBytes(input) + if err != nil { + return Nil + } + return uuid +} + +// FromString returns UUID parsed from string input. +// Input is expected in a form accepted by UnmarshalText. +func FromString(input string) (u UUID, err error) { + err = u.UnmarshalText([]byte(input)) + return +} + +// FromStringOrNil returns UUID parsed from string input. +// Same behavior as FromString, but returns a Nil UUID on error. +func FromStringOrNil(input string) UUID { + uuid, err := FromString(input) + if err != nil { + return Nil + } + return uuid +} + +// MarshalText implements the encoding.TextMarshaler interface. +// The encoding is the same as returned by String. +func (u UUID) MarshalText() (text []byte, err error) { + text = []byte(u.String()) + return +} + +// UnmarshalText implements the encoding.TextUnmarshaler interface. +// Following formats are supported: +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" +// "6ba7b8109dad11d180b400c04fd430c8" +// ABNF for supported UUID text representation follows: +// uuid := canonical | hashlike | braced | urn +// plain := canonical | hashlike +// canonical := 4hexoct '-' 2hexoct '-' 2hexoct '-' 6hexoct +// hashlike := 12hexoct +// braced := '{' plain '}' +// urn := URN ':' UUID-NID ':' plain +// URN := 'urn' +// UUID-NID := 'uuid' +// 12hexoct := 6hexoct 6hexoct +// 6hexoct := 4hexoct 2hexoct +// 4hexoct := 2hexoct 2hexoct +// 2hexoct := hexoct hexoct +// hexoct := hexdig hexdig +// hexdig := '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9' | +// 'a' | 'b' | 'c' | 'd' | 'e' | 'f' | +// 'A' | 'B' | 'C' | 'D' | 'E' | 'F' +func (u *UUID) UnmarshalText(text []byte) (err error) { + switch len(text) { + case 32: + return u.decodeHashLike(text) + case 36: + return u.decodeCanonical(text) + case 38: + return u.decodeBraced(text) + case 41: + fallthrough + case 45: + return u.decodeURN(text) + default: + return fmt.Errorf("uuid: incorrect UUID length: %s", text) + } +} + +// decodeCanonical decodes UUID string in format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8". +func (u *UUID) decodeCanonical(t []byte) (err error) { + if t[8] != '-' || t[13] != '-' || t[18] != '-' || t[23] != '-' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + src := t[:] + dst := u[:] + + for i, byteGroup := range byteGroups { + if i > 0 { + src = src[1:] // skip dash + } + _, err = hex.Decode(dst[:byteGroup/2], src[:byteGroup]) + if err != nil { + return + } + src = src[byteGroup:] + dst = dst[byteGroup/2:] + } + + return +} + +// decodeHashLike decodes UUID string in format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeHashLike(t []byte) (err error) { + src := t[:] + dst := u[:] + + if _, err = hex.Decode(dst, src); err != nil { + return err + } + return +} + +// decodeBraced decodes UUID string in format +// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" or in format +// "{6ba7b8109dad11d180b400c04fd430c8}". +func (u *UUID) decodeBraced(t []byte) (err error) { + l := len(t) + + if t[0] != '{' || t[l-1] != '}' { + return fmt.Errorf("uuid: incorrect UUID format %s", t) + } + + return u.decodePlain(t[1 : l-1]) +} + +// decodeURN decodes UUID string in format +// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in format +// "urn:uuid:6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodeURN(t []byte) (err error) { + total := len(t) + + urn_uuid_prefix := t[:9] + + if !bytes.Equal(urn_uuid_prefix, urnPrefix) { + return fmt.Errorf("uuid: incorrect UUID format: %s", t) + } + + return u.decodePlain(t[9:total]) +} + +// decodePlain decodes UUID string in canonical format +// "6ba7b810-9dad-11d1-80b4-00c04fd430c8" or in hash-like format +// "6ba7b8109dad11d180b400c04fd430c8". +func (u *UUID) decodePlain(t []byte) (err error) { + switch len(t) { + case 32: + return u.decodeHashLike(t) + case 36: + return u.decodeCanonical(t) + default: + return fmt.Errorf("uuid: incorrrect UUID length: %s", t) + } +} + +// MarshalBinary implements the encoding.BinaryMarshaler interface. +func (u UUID) MarshalBinary() (data []byte, err error) { + data = u.Bytes() + return +} + +// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. +// It will return error if the slice isn't 16 bytes long. +func (u *UUID) UnmarshalBinary(data []byte) (err error) { + if len(data) != Size { + err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) + return + } + copy(u[:], data) + + return +} diff --git a/vendor/github.com/satori/go.uuid/codec_test.go b/vendor/github.com/satori/go.uuid/codec_test.go new file mode 100644 index 0000000..101ec52 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/codec_test.go @@ -0,0 +1,248 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "bytes" + + . "gopkg.in/check.v1" +) + +type codecTestSuite struct{} + +var _ = Suite(&codecTestSuite{}) + +func (s *codecTestSuite) TestFromBytes(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1, err := FromBytes(b1) + c.Assert(err, IsNil) + c.Assert(u1, Equals, u) + + b2 := []byte{} + _, err = FromBytes(b2) + c.Assert(err, NotNil) +} + +func (s *codecTestSuite) BenchmarkFromBytes(c *C) { + bytes := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + for i := 0; i < c.N; i++ { + FromBytes(bytes) + } +} + +func (s *codecTestSuite) TestMarshalBinary(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + b2, err := u.MarshalBinary() + c.Assert(err, IsNil) + c.Assert(bytes.Equal(b1, b2), Equals, true) +} + +func (s *codecTestSuite) BenchmarkMarshalBinary(c *C) { + u := NewV4() + for i := 0; i < c.N; i++ { + u.MarshalBinary() + } +} + +func (s *codecTestSuite) TestUnmarshalBinary(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.UnmarshalBinary(b1) + c.Assert(err, IsNil) + c.Assert(u1, Equals, u) + + b2 := []byte{} + u2 := UUID{} + err = u2.UnmarshalBinary(b2) + c.Assert(err, NotNil) +} + +func (s *codecTestSuite) TestFromString(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + s4 := "6ba7b8109dad11d180b400c04fd430c8" + s5 := "urn:uuid:6ba7b8109dad11d180b400c04fd430c8" + + _, err := FromString("") + c.Assert(err, NotNil) + + u1, err := FromString(s1) + c.Assert(err, IsNil) + c.Assert(u1, Equals, u) + + u2, err := FromString(s2) + c.Assert(err, IsNil) + c.Assert(u2, Equals, u) + + u3, err := FromString(s3) + c.Assert(err, IsNil) + c.Assert(u3, Equals, u) + + u4, err := FromString(s4) + c.Assert(err, IsNil) + c.Assert(u4, Equals, u) + + u5, err := FromString(s5) + c.Assert(err, IsNil) + c.Assert(u5, Equals, u) +} + +func (s *codecTestSuite) BenchmarkFromString(c *C) { + str := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < c.N; i++ { + FromString(str) + } +} + +func (s *codecTestSuite) BenchmarkFromStringUrn(c *C) { + str := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" + for i := 0; i < c.N; i++ { + FromString(str) + } +} + +func (s *codecTestSuite) BenchmarkFromStringWithBrackets(c *C) { + str := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" + for i := 0; i < c.N; i++ { + FromString(str) + } +} + +func (s *codecTestSuite) TestFromStringShort(c *C) { + // Invalid 35-character UUID string + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c" + + for i := len(s1); i >= 0; i-- { + _, err := FromString(s1[:i]) + c.Assert(err, NotNil) + } +} + +func (s *codecTestSuite) TestFromStringLong(c *C) { + // Invalid 37+ character UUID string + strings := []string{ + "6ba7b810-9dad-11d1-80b4-00c04fd430c8=", + "6ba7b810-9dad-11d1-80b4-00c04fd430c8}", + "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f", + "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8", + } + + for _, str := range strings { + _, err := FromString(str) + c.Assert(err, NotNil) + } +} + +func (s *codecTestSuite) TestFromStringInvalid(c *C) { + // Invalid UUID string formats + strings := []string{ + "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8", + "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", + "uuid:urn:6ba7b810-9dad-11d1-80b4-00c04fd430c8", + "uuid:urn:6ba7b8109dad11d180b400c04fd430c8", + "6ba7b8109-dad-11d1-80b4-00c04fd430c8", + "6ba7b810-9dad1-1d1-80b4-00c04fd430c8", + "6ba7b810-9dad-11d18-0b4-00c04fd430c8", + "6ba7b810-9dad-11d1-80b40-0c04fd430c8", + "6ba7b810+9dad+11d1+80b4+00c04fd430c8", + "(6ba7b810-9dad-11d1-80b4-00c04fd430c8}", + "{6ba7b810-9dad-11d1-80b4-00c04fd430c8>", + "zba7b810-9dad-11d1-80b4-00c04fd430c8", + "6ba7b810-9dad11d180b400c04fd430c8", + "6ba7b8109dad-11d180b400c04fd430c8", + "6ba7b8109dad11d1-80b400c04fd430c8", + "6ba7b8109dad11d180b4-00c04fd430c8", + } + + for _, str := range strings { + _, err := FromString(str) + c.Assert(err, NotNil) + } +} + +func (s *codecTestSuite) TestFromStringOrNil(c *C) { + u := FromStringOrNil("") + c.Assert(u, Equals, Nil) +} + +func (s *codecTestSuite) TestFromBytesOrNil(c *C) { + b := []byte{} + u := FromBytesOrNil(b) + c.Assert(u, Equals, Nil) +} + +func (s *codecTestSuite) TestMarshalText(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + b2, err := u.MarshalText() + c.Assert(err, IsNil) + c.Assert(bytes.Equal(b1, b2), Equals, true) +} + +func (s *codecTestSuite) BenchmarkMarshalText(c *C) { + u := NewV4() + for i := 0; i < c.N; i++ { + u.MarshalText() + } +} + +func (s *codecTestSuite) TestUnmarshalText(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.UnmarshalText(b1) + c.Assert(err, IsNil) + c.Assert(u1, Equals, u) + + b2 := []byte("") + u2 := UUID{} + err = u2.UnmarshalText(b2) + c.Assert(err, NotNil) +} + +func (s *codecTestSuite) BenchmarkUnmarshalText(c *C) { + bytes := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + u := UUID{} + for i := 0; i < c.N; i++ { + u.UnmarshalText(bytes) + } +} + +var sink string + +func (s *codecTestSuite) BenchmarkMarshalToString(c *C) { + u := NewV4() + for i := 0; i < c.N; i++ { + sink = u.String() + } +} diff --git a/vendor/github.com/satori/go.uuid/generator.go b/vendor/github.com/satori/go.uuid/generator.go new file mode 100644 index 0000000..3f2f1da --- /dev/null +++ b/vendor/github.com/satori/go.uuid/generator.go @@ -0,0 +1,239 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "crypto/md5" + "crypto/rand" + "crypto/sha1" + "encoding/binary" + "hash" + "net" + "os" + "sync" + "time" +) + +// Difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). +const epochStart = 122192928000000000 + +var ( + global = newDefaultGenerator() + + epochFunc = unixTimeFunc + posixUID = uint32(os.Getuid()) + posixGID = uint32(os.Getgid()) +) + +// NewV1 returns UUID based on current timestamp and MAC address. +func NewV1() UUID { + return global.NewV1() +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func NewV2(domain byte) UUID { + return global.NewV2(domain) +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func NewV3(ns UUID, name string) UUID { + return global.NewV3(ns, name) +} + +// NewV4 returns random generated UUID. +func NewV4() UUID { + return global.NewV4() +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func NewV5(ns UUID, name string) UUID { + return global.NewV5(ns, name) +} + +// Generator provides interface for generating UUIDs. +type Generator interface { + NewV1() UUID + NewV2(domain byte) UUID + NewV3(ns UUID, name string) UUID + NewV4() UUID + NewV5(ns UUID, name string) UUID +} + +// Default generator implementation. +type generator struct { + storageOnce sync.Once + storageMutex sync.Mutex + + lastTime uint64 + clockSequence uint16 + hardwareAddr [6]byte +} + +func newDefaultGenerator() Generator { + return &generator{} +} + +// NewV1 returns UUID based on current timestamp and MAC address. +func (g *generator) NewV1() UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + + copy(u[10:], hardwareAddr) + + u.SetVersion(V1) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV2 returns DCE Security UUID based on POSIX UID/GID. +func (g *generator) NewV2(domain byte) UUID { + u := UUID{} + + timeNow, clockSeq, hardwareAddr := g.getStorage() + + switch domain { + case DomainPerson: + binary.BigEndian.PutUint32(u[0:], posixUID) + case DomainGroup: + binary.BigEndian.PutUint32(u[0:], posixGID) + } + + binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) + binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) + binary.BigEndian.PutUint16(u[8:], clockSeq) + u[9] = domain + + copy(u[10:], hardwareAddr) + + u.SetVersion(V2) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV3 returns UUID based on MD5 hash of namespace UUID and name. +func (g *generator) NewV3(ns UUID, name string) UUID { + u := newFromHash(md5.New(), ns, name) + u.SetVersion(V3) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV4 returns random generated UUID. +func (g *generator) NewV4() UUID { + u := UUID{} + g.safeRandom(u[:]) + u.SetVersion(V4) + u.SetVariant(VariantRFC4122) + + return u +} + +// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. +func (g *generator) NewV5(ns UUID, name string) UUID { + u := newFromHash(sha1.New(), ns, name) + u.SetVersion(V5) + u.SetVariant(VariantRFC4122) + + return u +} + +func (g *generator) initStorage() { + g.initClockSequence() + g.initHardwareAddr() +} + +func (g *generator) initClockSequence() { + buf := make([]byte, 2) + g.safeRandom(buf) + g.clockSequence = binary.BigEndian.Uint16(buf) +} + +func (g *generator) initHardwareAddr() { + interfaces, err := net.Interfaces() + if err == nil { + for _, iface := range interfaces { + if len(iface.HardwareAddr) >= 6 { + copy(g.hardwareAddr[:], iface.HardwareAddr) + return + } + } + } + + // Initialize hardwareAddr randomly in case + // of real network interfaces absence + g.safeRandom(g.hardwareAddr[:]) + + // Set multicast bit as recommended in RFC 4122 + g.hardwareAddr[0] |= 0x01 +} + +func (g *generator) safeRandom(dest []byte) { + if _, err := rand.Read(dest); err != nil { + panic(err) + } +} + +// Returns UUID v1/v2 storage state. +// Returns epoch timestamp, clock sequence, and hardware address. +func (g *generator) getStorage() (uint64, uint16, []byte) { + g.storageOnce.Do(g.initStorage) + + g.storageMutex.Lock() + defer g.storageMutex.Unlock() + + timeNow := epochFunc() + // Clock changed backwards since last UUID generation. + // Should increase clock sequence. + if timeNow <= g.lastTime { + g.clockSequence++ + } + g.lastTime = timeNow + + return timeNow, g.clockSequence, g.hardwareAddr[:] +} + +// Returns difference in 100-nanosecond intervals between +// UUID epoch (October 15, 1582) and current time. +// This is default epoch calculation function. +func unixTimeFunc() uint64 { + return epochStart + uint64(time.Now().UnixNano()/100) +} + +// Returns UUID based on hashing of namespace UUID and name. +func newFromHash(h hash.Hash, ns UUID, name string) UUID { + u := UUID{} + h.Write(ns[:]) + h.Write([]byte(name)) + copy(u[:], h.Sum(nil)) + + return u +} diff --git a/vendor/github.com/satori/go.uuid/generator_test.go b/vendor/github.com/satori/go.uuid/generator_test.go new file mode 100644 index 0000000..cd69e2e --- /dev/null +++ b/vendor/github.com/satori/go.uuid/generator_test.go @@ -0,0 +1,134 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + . "gopkg.in/check.v1" +) + +type genTestSuite struct{} + +var _ = Suite(&genTestSuite{}) + +func (s *genTestSuite) TestNewV1(c *C) { + u := NewV1() + c.Assert(u.Version(), Equals, V1) + c.Assert(u.Variant(), Equals, VariantRFC4122) + + u1 := NewV1() + u2 := NewV1() + c.Assert(u1, Not(Equals), u2) + + oldFunc := epochFunc + epochFunc = func() uint64 { return 0 } + + u3 := NewV1() + u4 := NewV1() + c.Assert(u3, Not(Equals), u4) + + epochFunc = oldFunc +} + +func (s *genTestSuite) BenchmarkNewV1(c *C) { + for i := 0; i < c.N; i++ { + NewV1() + } +} + +func (s *genTestSuite) TestNewV2(c *C) { + u1 := NewV2(DomainPerson) + c.Assert(u1.Version(), Equals, V2) + c.Assert(u1.Variant(), Equals, VariantRFC4122) + + u2 := NewV2(DomainGroup) + c.Assert(u2.Version(), Equals, V2) + c.Assert(u2.Variant(), Equals, VariantRFC4122) +} + +func (s *genTestSuite) BenchmarkNewV2(c *C) { + for i := 0; i < c.N; i++ { + NewV2(DomainPerson) + } +} + +func (s *genTestSuite) TestNewV3(c *C) { + u := NewV3(NamespaceDNS, "www.example.com") + c.Assert(u.Version(), Equals, V3) + c.Assert(u.Variant(), Equals, VariantRFC4122) + c.Assert(u.String(), Equals, "5df41881-3aed-3515-88a7-2f4a814cf09e") + + u = NewV3(NamespaceDNS, "python.org") + c.Assert(u.String(), Equals, "6fa459ea-ee8a-3ca4-894e-db77e160355e") + + u1 := NewV3(NamespaceDNS, "golang.org") + u2 := NewV3(NamespaceDNS, "golang.org") + c.Assert(u1, Equals, u2) + + u3 := NewV3(NamespaceDNS, "example.com") + c.Assert(u1, Not(Equals), u3) + + u4 := NewV3(NamespaceURL, "golang.org") + c.Assert(u1, Not(Equals), u4) +} + +func (s *genTestSuite) BenchmarkNewV3(c *C) { + for i := 0; i < c.N; i++ { + NewV3(NamespaceDNS, "www.example.com") + } +} + +func (s *genTestSuite) TestNewV4(c *C) { + u := NewV4() + c.Assert(u.Version(), Equals, V4) + c.Assert(u.Variant(), Equals, VariantRFC4122) +} + +func (s *genTestSuite) BenchmarkNewV4(c *C) { + for i := 0; i < c.N; i++ { + NewV4() + } +} + +func (s *genTestSuite) TestNewV5(c *C) { + u := NewV5(NamespaceDNS, "www.example.com") + c.Assert(u.Version(), Equals, V5) + c.Assert(u.Variant(), Equals, VariantRFC4122) + + u = NewV5(NamespaceDNS, "python.org") + c.Assert(u.String(), Equals, "886313e1-3b8a-5372-9b90-0c9aee199e5d") + + u1 := NewV5(NamespaceDNS, "golang.org") + u2 := NewV5(NamespaceDNS, "golang.org") + c.Assert(u1, Equals, u2) + + u3 := NewV5(NamespaceDNS, "example.com") + c.Assert(u1, Not(Equals), u3) + + u4 := NewV5(NamespaceURL, "golang.org") + c.Assert(u1, Not(Equals), u4) +} + +func (s *genTestSuite) BenchmarkNewV5(c *C) { + for i := 0; i < c.N; i++ { + NewV5(NamespaceDNS, "www.example.com") + } +} diff --git a/vendor/github.com/satori/go.uuid/sql.go b/vendor/github.com/satori/go.uuid/sql.go new file mode 100644 index 0000000..56759d3 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/sql.go @@ -0,0 +1,78 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + "database/sql/driver" + "fmt" +) + +// Value implements the driver.Valuer interface. +func (u UUID) Value() (driver.Value, error) { + return u.String(), nil +} + +// Scan implements the sql.Scanner interface. +// A 16-byte slice is handled by UnmarshalBinary, while +// a longer byte slice or a string is handled by UnmarshalText. +func (u *UUID) Scan(src interface{}) error { + switch src := src.(type) { + case []byte: + if len(src) == Size { + return u.UnmarshalBinary(src) + } + return u.UnmarshalText(src) + + case string: + return u.UnmarshalText([]byte(src)) + } + + return fmt.Errorf("uuid: cannot convert %T to UUID", src) +} + +// NullUUID can be used with the standard sql package to represent a +// UUID value that can be NULL in the database +type NullUUID struct { + UUID UUID + Valid bool +} + +// Value implements the driver.Valuer interface. +func (u NullUUID) Value() (driver.Value, error) { + if !u.Valid { + return nil, nil + } + // Delegate to UUID Value function + return u.UUID.Value() +} + +// Scan implements the sql.Scanner interface. +func (u *NullUUID) Scan(src interface{}) error { + if src == nil { + u.UUID, u.Valid = Nil, false + return nil + } + + // Delegate to UUID Scan function + u.Valid = true + return u.UUID.Scan(src) +} diff --git a/vendor/github.com/satori/go.uuid/sql_test.go b/vendor/github.com/satori/go.uuid/sql_test.go new file mode 100644 index 0000000..74255f5 --- /dev/null +++ b/vendor/github.com/satori/go.uuid/sql_test.go @@ -0,0 +1,136 @@ +// Copyright (C) 2013-2018 by Maxim Bublis +// +// Permission is hereby granted, free of charge, to any person obtaining +// a copy of this software and associated documentation files (the +// "Software"), to deal in the Software without restriction, including +// without limitation the rights to use, copy, modify, merge, publish, +// distribute, sublicense, and/or sell copies of the Software, and to +// permit persons to whom the Software is furnished to do so, subject to +// the following conditions: +// +// The above copyright notice and this permission notice shall be +// included in all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +// EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +// MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +// NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +// LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +// OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +// WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +package uuid + +import ( + . "gopkg.in/check.v1" +) + +type sqlTestSuite struct{} + +var _ = Suite(&sqlTestSuite{}) + +func (s *sqlTestSuite) TestValue(c *C) { + u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + c.Assert(err, IsNil) + + val, err := u.Value() + c.Assert(err, IsNil) + c.Assert(val, Equals, u.String()) +} + +func (s *sqlTestSuite) TestValueNil(c *C) { + u := UUID{} + + val, err := u.Value() + c.Assert(err, IsNil) + c.Assert(val, Equals, Nil.String()) +} + +func (s *sqlTestSuite) TestNullUUIDValueNil(c *C) { + u := NullUUID{} + + val, err := u.Value() + c.Assert(err, IsNil) + c.Assert(val, IsNil) +} + +func (s *sqlTestSuite) TestScanBinary(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + u1 := UUID{} + err := u1.Scan(b1) + c.Assert(err, IsNil) + c.Assert(u, Equals, u1) + + b2 := []byte{} + u2 := UUID{} + + err = u2.Scan(b2) + c.Assert(err, NotNil) +} + +func (s *sqlTestSuite) TestScanString(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + u1 := UUID{} + err := u1.Scan(s1) + c.Assert(err, IsNil) + c.Assert(u, Equals, u1) + + s2 := "" + u2 := UUID{} + + err = u2.Scan(s2) + c.Assert(err, NotNil) +} + +func (s *sqlTestSuite) TestScanText(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + + u1 := UUID{} + err := u1.Scan(b1) + c.Assert(err, IsNil) + c.Assert(u, Equals, u1) + + b2 := []byte("") + u2 := UUID{} + err = u2.Scan(b2) + c.Assert(err, NotNil) +} + +func (s *sqlTestSuite) TestScanUnsupported(c *C) { + u := UUID{} + + err := u.Scan(true) + c.Assert(err, NotNil) +} + +func (s *sqlTestSuite) TestScanNil(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + + err := u.Scan(nil) + c.Assert(err, NotNil) +} + +func (s *sqlTestSuite) TestNullUUIDScanValid(c *C) { + u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} + s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" + + u1 := NullUUID{} + err := u1.Scan(s1) + c.Assert(err, IsNil) + c.Assert(u1.Valid, Equals, true) + c.Assert(u1.UUID, Equals, u) +} + +func (s *sqlTestSuite) TestNullUUIDScanNil(c *C) { + u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true} + + err := u.Scan(nil) + c.Assert(err, IsNil) + c.Assert(u.Valid, Equals, false) + c.Assert(u.UUID, Equals, Nil) +} diff --git a/vendor/github.com/satori/go.uuid/uuid.go b/vendor/github.com/satori/go.uuid/uuid.go index 9c7fbaa..a2b8e2c 100644 --- a/vendor/github.com/satori/go.uuid/uuid.go +++ b/vendor/github.com/satori/go.uuid/uuid.go @@ -1,4 +1,4 @@ -// Copyright (C) 2013-2015 by Maxim Bublis +// Copyright (C) 2013-2018 by Maxim Bublis // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the @@ -26,23 +26,29 @@ package uuid import ( "bytes" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "database/sql/driver" - "encoding/binary" "encoding/hex" - "fmt" - "hash" - "net" - "os" - "sync" - "time" +) + +// Size of a UUID in bytes. +const Size = 16 + +// UUID representation compliant with specification +// described in RFC 4122. +type UUID [Size]byte + +// UUID versions +const ( + _ byte = iota + V1 + V2 + V3 + V4 + V5 ) // UUID layout variants. const ( - VariantNCS = iota + VariantNCS byte = iota VariantRFC4122 VariantMicrosoft VariantFuture @@ -55,136 +61,48 @@ const ( DomainOrg ) -// Difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and Unix epoch (January 1, 1970). -const epochStart = 122192928000000000 - -// Used in string method conversion -const dash byte = '-' - -// UUID v1/v2 storage. -var ( - storageMutex sync.Mutex - storageOnce sync.Once - epochFunc = unixTimeFunc - clockSequence uint16 - lastTime uint64 - hardwareAddr [6]byte - posixUID = uint32(os.Getuid()) - posixGID = uint32(os.Getgid()) -) - // String parse helpers. var ( urnPrefix = []byte("urn:uuid:") byteGroups = []int{8, 4, 4, 4, 12} ) -func initClockSequence() { - buf := make([]byte, 2) - safeRandom(buf) - clockSequence = binary.BigEndian.Uint16(buf) -} - -func initHardwareAddr() { - interfaces, err := net.Interfaces() - if err == nil { - for _, iface := range interfaces { - if len(iface.HardwareAddr) >= 6 { - copy(hardwareAddr[:], iface.HardwareAddr) - return - } - } - } - - // Initialize hardwareAddr randomly in case - // of real network interfaces absence - safeRandom(hardwareAddr[:]) - - // Set multicast bit as recommended in RFC 4122 - hardwareAddr[0] |= 0x01 -} - -func initStorage() { - initClockSequence() - initHardwareAddr() -} - -func safeRandom(dest []byte) { - if _, err := rand.Read(dest); err != nil { - panic(err) - } -} - -// Returns difference in 100-nanosecond intervals between -// UUID epoch (October 15, 1582) and current time. -// This is default epoch calculation function. -func unixTimeFunc() uint64 { - return epochStart + uint64(time.Now().UnixNano()/100) -} - -// UUID representation compliant with specification -// described in RFC 4122. -type UUID [16]byte - -// NullUUID can be used with the standard sql package to represent a -// UUID value that can be NULL in the database -type NullUUID struct { - UUID UUID - Valid bool -} - -// The nil UUID is special form of UUID that is specified to have all +// Nil is special form of UUID that is specified to have all // 128 bits set to zero. var Nil = UUID{} // Predefined namespace UUIDs. var ( - NamespaceDNS, _ = FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - NamespaceURL, _ = FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8") - NamespaceOID, _ = FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8") - NamespaceX500, _ = FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NamespaceDNS = Must(FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8")) + NamespaceURL = Must(FromString("6ba7b811-9dad-11d1-80b4-00c04fd430c8")) + NamespaceOID = Must(FromString("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) + NamespaceX500 = Must(FromString("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) ) -// And returns result of binary AND of two UUIDs. -func And(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] & u2[i] - } - return u -} - -// Or returns result of binary OR of two UUIDs. -func Or(u1 UUID, u2 UUID) UUID { - u := UUID{} - for i := 0; i < 16; i++ { - u[i] = u1[i] | u2[i] - } - return u -} - // Equal returns true if u1 and u2 equals, otherwise returns false. func Equal(u1 UUID, u2 UUID) bool { return bytes.Equal(u1[:], u2[:]) } // Version returns algorithm version used to generate UUID. -func (u UUID) Version() uint { - return uint(u[6] >> 4) +func (u UUID) Version() byte { + return u[6] >> 4 } // Variant returns UUID layout variant. -func (u UUID) Variant() uint { +func (u UUID) Variant() byte { switch { - case (u[8] & 0x80) == 0x00: + case (u[8] >> 7) == 0x00: return VariantNCS - case (u[8]&0xc0)|0x80 == 0x80: + case (u[8] >> 6) == 0x02: return VariantRFC4122 - case (u[8]&0xe0)|0xc0 == 0xc0: + case (u[8] >> 5) == 0x06: return VariantMicrosoft + case (u[8] >> 5) == 0x07: + fallthrough + default: + return VariantFuture } - return VariantFuture } // Bytes returns bytes slice representation of UUID. @@ -198,13 +116,13 @@ func (u UUID) String() string { buf := make([]byte, 36) hex.Encode(buf[0:8], u[0:4]) - buf[8] = dash + buf[8] = '-' hex.Encode(buf[9:13], u[4:6]) - buf[13] = dash + buf[13] = '-' hex.Encode(buf[14:18], u[6:8]) - buf[18] = dash + buf[18] = '-' hex.Encode(buf[19:23], u[8:10]) - buf[23] = dash + buf[23] = '-' hex.Encode(buf[24:], u[10:]) return string(buf) @@ -215,274 +133,29 @@ func (u *UUID) SetVersion(v byte) { u[6] = (u[6] & 0x0f) | (v << 4) } -// SetVariant sets variant bits as described in RFC 4122. -func (u *UUID) SetVariant() { - u[8] = (u[8] & 0xbf) | 0x80 -} - -// MarshalText implements the encoding.TextMarshaler interface. -// The encoding is the same as returned by String. -func (u UUID) MarshalText() (text []byte, err error) { - text = []byte(u.String()) - return -} - -// UnmarshalText implements the encoding.TextUnmarshaler interface. -// Following formats are supported: -// "6ba7b810-9dad-11d1-80b4-00c04fd430c8", -// "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", -// "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" -func (u *UUID) UnmarshalText(text []byte) (err error) { - if len(text) < 32 { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return +// SetVariant sets variant bits. +func (u *UUID) SetVariant(v byte) { + switch v { + case VariantNCS: + u[8] = (u[8]&(0xff>>1) | (0x00 << 7)) + case VariantRFC4122: + u[8] = (u[8]&(0xff>>2) | (0x02 << 6)) + case VariantMicrosoft: + u[8] = (u[8]&(0xff>>3) | (0x06 << 5)) + case VariantFuture: + fallthrough + default: + u[8] = (u[8]&(0xff>>3) | (0x07 << 5)) } - - t := text[:] - braced := false - - if bytes.Equal(t[:9], urnPrefix) { - t = t[9:] - } else if t[0] == '{' { - braced = true - t = t[1:] - } - - b := u[:] - - for i, byteGroup := range byteGroups { - if i > 0 && t[0] == '-' { - t = t[1:] - } else if i > 0 && t[0] != '-' { - err = fmt.Errorf("uuid: invalid string format") - return - } - - if i == 2 { - if !bytes.Contains([]byte("012345"), []byte{t[0]}) { - err = fmt.Errorf("uuid: invalid version number: %s", t[0]) - return - } - } - - if len(t) < byteGroup { - err = fmt.Errorf("uuid: UUID string too short: %s", text) - return - } - - if i == 4 && len(t) > byteGroup && - ((braced && t[byteGroup] != '}') || len(t[byteGroup:]) > 1 || !braced) { - err = fmt.Errorf("uuid: UUID string too long: %s", t) - return - } - - _, err = hex.Decode(b[:byteGroup/2], t[:byteGroup]) - - if err != nil { - return - } - - t = t[byteGroup:] - b = b[byteGroup/2:] - } - - return } -// MarshalBinary implements the encoding.BinaryMarshaler interface. -func (u UUID) MarshalBinary() (data []byte, err error) { - data = u.Bytes() - return -} - -// UnmarshalBinary implements the encoding.BinaryUnmarshaler interface. -// It will return error if the slice isn't 16 bytes long. -func (u *UUID) UnmarshalBinary(data []byte) (err error) { - if len(data) != 16 { - err = fmt.Errorf("uuid: UUID must be exactly 16 bytes long, got %d bytes", len(data)) - return - } - copy(u[:], data) - - return -} - -// Value implements the driver.Valuer interface. -func (u UUID) Value() (driver.Value, error) { - return u.String(), nil -} - -// Scan implements the sql.Scanner interface. -// A 16-byte slice is handled by UnmarshalBinary, while -// a longer byte slice or a string is handled by UnmarshalText. -func (u *UUID) Scan(src interface{}) error { - switch src := src.(type) { - case []byte: - if len(src) == 16 { - return u.UnmarshalBinary(src) - } - return u.UnmarshalText(src) - - case string: - return u.UnmarshalText([]byte(src)) - } - - return fmt.Errorf("uuid: cannot convert %T to UUID", src) -} - -// Value implements the driver.Valuer interface. -func (u NullUUID) Value() (driver.Value, error) { - if !u.Valid { - return nil, nil - } - // Delegate to UUID Value function - return u.UUID.Value() -} - -// Scan implements the sql.Scanner interface. -func (u *NullUUID) Scan(src interface{}) error { - if src == nil { - u.UUID, u.Valid = Nil, false - return nil - } - - // Delegate to UUID Scan function - u.Valid = true - return u.UUID.Scan(src) -} - -// FromBytes returns UUID converted from raw byte slice input. -// It will return error if the slice isn't 16 bytes long. -func FromBytes(input []byte) (u UUID, err error) { - err = u.UnmarshalBinary(input) - return -} - -// FromBytesOrNil returns UUID converted from raw byte slice input. -// Same behavior as FromBytes, but returns a Nil UUID on error. -func FromBytesOrNil(input []byte) UUID { - uuid, err := FromBytes(input) +// Must is a helper that wraps a call to a function returning (UUID, error) +// and panics if the error is non-nil. It is intended for use in variable +// initializations such as +// var packageUUID = uuid.Must(uuid.FromString("123e4567-e89b-12d3-a456-426655440000")); +func Must(u UUID, err error) UUID { if err != nil { - return Nil + panic(err) } - return uuid -} - -// FromString returns UUID parsed from string input. -// Input is expected in a form accepted by UnmarshalText. -func FromString(input string) (u UUID, err error) { - err = u.UnmarshalText([]byte(input)) - return -} - -// FromStringOrNil returns UUID parsed from string input. -// Same behavior as FromString, but returns a Nil UUID on error. -func FromStringOrNil(input string) UUID { - uuid, err := FromString(input) - if err != nil { - return Nil - } - return uuid -} - -// Returns UUID v1/v2 storage state. -// Returns epoch timestamp, clock sequence, and hardware address. -func getStorage() (uint64, uint16, []byte) { - storageOnce.Do(initStorage) - - storageMutex.Lock() - defer storageMutex.Unlock() - - timeNow := epochFunc() - // Clock changed backwards since last UUID generation. - // Should increase clock sequence. - if timeNow <= lastTime { - clockSequence++ - } - lastTime = timeNow - - return timeNow, clockSequence, hardwareAddr[:] -} - -// NewV1 returns UUID based on current timestamp and MAC address. -func NewV1() UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - binary.BigEndian.PutUint32(u[0:], uint32(timeNow)) - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - - copy(u[10:], hardwareAddr) - - u.SetVersion(1) - u.SetVariant() - - return u -} - -// NewV2 returns DCE Security UUID based on POSIX UID/GID. -func NewV2(domain byte) UUID { - u := UUID{} - - timeNow, clockSeq, hardwareAddr := getStorage() - - switch domain { - case DomainPerson: - binary.BigEndian.PutUint32(u[0:], posixUID) - case DomainGroup: - binary.BigEndian.PutUint32(u[0:], posixGID) - } - - binary.BigEndian.PutUint16(u[4:], uint16(timeNow>>32)) - binary.BigEndian.PutUint16(u[6:], uint16(timeNow>>48)) - binary.BigEndian.PutUint16(u[8:], clockSeq) - u[9] = domain - - copy(u[10:], hardwareAddr) - - u.SetVersion(2) - u.SetVariant() - - return u -} - -// NewV3 returns UUID based on MD5 hash of namespace UUID and name. -func NewV3(ns UUID, name string) UUID { - u := newFromHash(md5.New(), ns, name) - u.SetVersion(3) - u.SetVariant() - - return u -} - -// NewV4 returns random generated UUID. -func NewV4() UUID { - u := UUID{} - safeRandom(u[:]) - u.SetVersion(4) - u.SetVariant() - - return u -} - -// NewV5 returns UUID based on SHA-1 hash of namespace UUID and name. -func NewV5(ns UUID, name string) UUID { - u := newFromHash(sha1.New(), ns, name) - u.SetVersion(5) - u.SetVariant() - - return u -} - -// Returns UUID based on hashing of namespace UUID and name. -func newFromHash(h hash.Hash, ns UUID, name string) UUID { - u := UUID{} - h.Write(ns[:]) - h.Write([]byte(name)) - copy(u[:], h.Sum(nil)) - return u } diff --git a/vendor/github.com/satori/go.uuid/uuid_test.go b/vendor/github.com/satori/go.uuid/uuid_test.go index aa68ac9..beb336d 100644 --- a/vendor/github.com/satori/go.uuid/uuid_test.go +++ b/vendor/github.com/satori/go.uuid/uuid_test.go @@ -1,4 +1,4 @@ -// Copyright (C) 2013, 2015 by Maxim Bublis +// Copyright (C) 2013-2018 by Maxim Bublis // // Permission is hereby granted, free of charge, to any person obtaining // a copy of this software and associated documentation files (the @@ -24,610 +24,67 @@ package uuid import ( "bytes" "testing" + + . "gopkg.in/check.v1" ) -func TestBytes(t *testing.T) { +// Hook up gocheck into the "go test" runner. +func TestUUID(t *testing.T) { TestingT(t) } + +type testSuite struct{} + +var _ = Suite(&testSuite{}) + +func (s *testSuite) TestBytes(c *C) { u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} bytes1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - if !bytes.Equal(u.Bytes(), bytes1) { - t.Errorf("Incorrect bytes representation for UUID: %s", u) - } + c.Assert(bytes.Equal(u.Bytes(), bytes1), Equals, true) } -func TestString(t *testing.T) { - if NamespaceDNS.String() != "6ba7b810-9dad-11d1-80b4-00c04fd430c8" { - t.Errorf("Incorrect string representation for UUID: %s", NamespaceDNS.String()) - } +func (s *testSuite) TestString(c *C) { + c.Assert(NamespaceDNS.String(), Equals, "6ba7b810-9dad-11d1-80b4-00c04fd430c8") } -func TestEqual(t *testing.T) { - if !Equal(NamespaceDNS, NamespaceDNS) { - t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceDNS) - } - - if Equal(NamespaceDNS, NamespaceURL) { - t.Errorf("Incorrect comparison of %s and %s", NamespaceDNS, NamespaceURL) - } +func (s *testSuite) TestEqual(c *C) { + c.Assert(Equal(NamespaceDNS, NamespaceDNS), Equals, true) + c.Assert(Equal(NamespaceDNS, NamespaceURL), Equals, false) } -func TestOr(t *testing.T) { - u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} - u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} - - u := UUID{0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff} - - if !Equal(u, Or(u1, u2)) { - t.Errorf("Incorrect bitwise OR result %s", Or(u1, u2)) - } -} - -func TestAnd(t *testing.T) { - u1 := UUID{0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff} - u2 := UUID{0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00, 0xff, 0x00} - - u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if !Equal(u, And(u1, u2)) { - t.Errorf("Incorrect bitwise AND result %s", And(u1, u2)) - } -} - -func TestVersion(t *testing.T) { +func (s *testSuite) TestVersion(c *C) { u := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u.Version() != 1 { - t.Errorf("Incorrect version for UUID: %d", u.Version()) - } + c.Assert(u.Version(), Equals, V1) } -func TestSetVersion(t *testing.T) { +func (s *testSuite) TestSetVersion(c *C) { u := UUID{} u.SetVersion(4) - - if u.Version() != 4 { - t.Errorf("Incorrect version for UUID after u.setVersion(4): %d", u.Version()) - } + c.Assert(u.Version(), Equals, V4) } -func TestVariant(t *testing.T) { +func (s *testSuite) TestVariant(c *C) { u1 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u1.Variant() != VariantNCS { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantNCS, u1.Variant()) - } + c.Assert(u1.Variant(), Equals, VariantNCS) u2 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u2.Variant() != VariantRFC4122 { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantRFC4122, u2.Variant()) - } + c.Assert(u2.Variant(), Equals, VariantRFC4122) u3 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u3.Variant() != VariantMicrosoft { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantMicrosoft, u3.Variant()) - } + c.Assert(u3.Variant(), Equals, VariantMicrosoft) u4 := UUID{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00} - - if u4.Variant() != VariantFuture { - t.Errorf("Incorrect variant for UUID variant %d: %d", VariantFuture, u4.Variant()) - } + c.Assert(u4.Variant(), Equals, VariantFuture) } -func TestSetVariant(t *testing.T) { - u := new(UUID) - u.SetVariant() - - if u.Variant() != VariantRFC4122 { - t.Errorf("Incorrect variant for UUID after u.setVariant(): %d", u.Variant()) - } -} - -func TestFromBytes(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1, err := FromBytes(b1) - if err != nil { - t.Errorf("Error parsing UUID from bytes: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte{} - - _, err = FromBytes(b2) - if err == nil { - t.Errorf("Should return error parsing from empty byte slice, got %s", err) - } -} - -func TestMarshalBinary(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - b2, err := u.MarshalBinary() - if err != nil { - t.Errorf("Error marshaling UUID: %s", err) - } - - if !bytes.Equal(b1, b2) { - t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) - } -} - -func TestUnmarshalBinary(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1 := UUID{} - err := u1.UnmarshalBinary(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte{} - u2 := UUID{} - - err = u2.UnmarshalBinary(b2) - if err == nil { - t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) - } -} - -func TestFromString(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - s2 := "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}" - s3 := "urn:uuid:6ba7b810-9dad-11d1-80b4-00c04fd430c8" - - _, err := FromString("") - if err == nil { - t.Errorf("Should return error trying to parse empty string, got %s", err) - } - - u1, err := FromString(s1) - if err != nil { - t.Errorf("Error parsing UUID from string: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - u2, err := FromString(s2) - if err != nil { - t.Errorf("Error parsing UUID from string: %s", err) - } - - if !Equal(u, u2) { - t.Errorf("UUIDs should be equal: %s and %s", u, u2) - } - - u3, err := FromString(s3) - if err != nil { - t.Errorf("Error parsing UUID from string: %s", err) - } - - if !Equal(u, u3) { - t.Errorf("UUIDs should be equal: %s and %s", u, u3) - } -} - -func TestFromStringShort(t *testing.T) { - // Invalid 35-character UUID string - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c" - - for i := len(s1); i >= 0; i-- { - _, err := FromString(s1[:i]) - if err == nil { - t.Errorf("Should return error trying to parse too short string, got %s", err) - } - } -} - -func TestFromStringLong(t *testing.T) { - // Invalid 37+ character UUID string - s := []string{ - "6ba7b810-9dad-11d1-80b4-00c04fd430c8=", - "6ba7b810-9dad-11d1-80b4-00c04fd430c8}", - "{6ba7b810-9dad-11d1-80b4-00c04fd430c8}f", - "6ba7b810-9dad-11d1-80b4-00c04fd430c800c04fd430c8", - } - - for _, str := range s { - _, err := FromString(str) - if err == nil { - t.Errorf("Should return error trying to parse too long string, passed %s", str) - } - } -} - -func TestFromStringInvalid(t *testing.T) { - // Invalid UUID string formats - s := []string{ - "6ba7b8109dad11d180b400c04fd430c8", - "6ba7b8109dad11d180b400c04fd430c86ba7b8109dad11d180b400c04fd430c8", - "urn:uuid:{6ba7b810-9dad-11d1-80b4-00c04fd430c8}", - "6ba7b8109-dad-11d1-80b4-00c04fd430c8", - "6ba7b810-9dad1-1d1-80b4-00c04fd430c8", - "6ba7b810-9dad-11d18-0b4-00c04fd430c8", - "6ba7b810-9dad-11d1-80b40-0c04fd430c8", - "6ba7b810+9dad+11d1+80b4+00c04fd430c8", - "6ba7b810-9dad11d180b400c04fd430c8", - "6ba7b8109dad-11d180b400c04fd430c8", - "6ba7b8109dad11d1-80b400c04fd430c8", - "6ba7b8109dad11d180b4-00c04fd430c8", - } - - for _, str := range s { - _, err := FromString(str) - if err == nil { - t.Errorf("Should return error trying to parse invalid string, passed %s", str) - } - } -} - -func TestFromStringOrNil(t *testing.T) { - u := FromStringOrNil("") - if u != Nil { - t.Errorf("Should return Nil UUID on parse failure, got %s", u) - } -} - -func TestFromBytesOrNil(t *testing.T) { - b := []byte{} - u := FromBytesOrNil(b) - if u != Nil { - t.Errorf("Should return Nil UUID on parse failure, got %s", u) - } -} - -func TestMarshalText(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - b2, err := u.MarshalText() - if err != nil { - t.Errorf("Error marshaling UUID: %s", err) - } - - if !bytes.Equal(b1, b2) { - t.Errorf("Marshaled UUID should be %s, got %s", b1, b2) - } -} - -func TestUnmarshalText(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - u1 := UUID{} - err := u1.UnmarshalText(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte("") - u2 := UUID{} - - err = u2.UnmarshalText(b2) - if err == nil { - t.Errorf("Should return error trying to unmarshal from empty string") - } -} - -func TestValue(t *testing.T) { - u, err := FromString("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - if err != nil { - t.Errorf("Error parsing UUID from string: %s", err) - } - - val, err := u.Value() - if err != nil { - t.Errorf("Error getting UUID value: %s", err) - } - - if val != u.String() { - t.Errorf("Wrong value returned, should be equal: %s and %s", val, u) - } -} - -func TestValueNil(t *testing.T) { +func (s *testSuite) TestSetVariant(c *C) { u := UUID{} - - val, err := u.Value() - if err != nil { - t.Errorf("Error getting UUID value: %s", err) - } - - if val != Nil.String() { - t.Errorf("Wrong value returned, should be equal to UUID.Nil: %s", val) - } -} - -func TestNullUUIDValueNil(t *testing.T) { - u := NullUUID{} - - val, err := u.Value() - if err != nil { - t.Errorf("Error getting UUID value: %s", err) - } - - if val != nil { - t.Errorf("Wrong value returned, should be nil: %s", val) - } -} - -func TestScanBinary(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - u1 := UUID{} - err := u1.Scan(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte{} - u2 := UUID{} - - err = u2.Scan(b2) - if err == nil { - t.Errorf("Should return error unmarshalling from empty byte slice, got %s", err) - } -} - -func TestScanString(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - - u1 := UUID{} - err := u1.Scan(s1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - s2 := "" - u2 := UUID{} - - err = u2.Scan(s2) - if err == nil { - t.Errorf("Should return error trying to unmarshal from empty string") - } -} - -func TestScanText(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - b1 := []byte("6ba7b810-9dad-11d1-80b4-00c04fd430c8") - - u1 := UUID{} - err := u1.Scan(b1) - if err != nil { - t.Errorf("Error unmarshaling UUID: %s", err) - } - - if !Equal(u, u1) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1) - } - - b2 := []byte("") - u2 := UUID{} - - err = u2.Scan(b2) - if err == nil { - t.Errorf("Should return error trying to unmarshal from empty string") - } -} - -func TestScanUnsupported(t *testing.T) { - u := UUID{} - - err := u.Scan(true) - if err == nil { - t.Errorf("Should return error trying to unmarshal from bool") - } -} - -func TestScanNil(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - - err := u.Scan(nil) - if err == nil { - t.Errorf("Error UUID shouldn't allow unmarshalling from nil") - } -} - -func TestNullUUIDScanValid(t *testing.T) { - u := UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8} - s1 := "6ba7b810-9dad-11d1-80b4-00c04fd430c8" - - u1 := NullUUID{} - err := u1.Scan(s1) - if err != nil { - t.Errorf("Error unmarshaling NullUUID: %s", err) - } - - if !u1.Valid { - t.Errorf("NullUUID should be valid") - } - - if !Equal(u, u1.UUID) { - t.Errorf("UUIDs should be equal: %s and %s", u, u1.UUID) - } -} - -func TestNullUUIDScanNil(t *testing.T) { - u := NullUUID{UUID{0x6b, 0xa7, 0xb8, 0x10, 0x9d, 0xad, 0x11, 0xd1, 0x80, 0xb4, 0x00, 0xc0, 0x4f, 0xd4, 0x30, 0xc8}, true} - - err := u.Scan(nil) - if err != nil { - t.Errorf("Error unmarshaling NullUUID: %s", err) - } - - if u.Valid { - t.Errorf("NullUUID should not be valid") - } - - if !Equal(u.UUID, Nil) { - t.Errorf("NullUUID value should be equal to Nil: %s", u) - } -} - -func TestNewV1(t *testing.T) { - u := NewV1() - - if u.Version() != 1 { - t.Errorf("UUIDv1 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv1 generated with incorrect variant: %d", u.Variant()) - } - - u1 := NewV1() - u2 := NewV1() - - if Equal(u1, u2) { - t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u1, u2) - } - - oldFunc := epochFunc - epochFunc = func() uint64 { return 0 } - - u3 := NewV1() - u4 := NewV1() - - if Equal(u3, u4) { - t.Errorf("UUIDv1 generated two equal UUIDs: %s and %s", u3, u4) - } - - epochFunc = oldFunc -} - -func TestNewV2(t *testing.T) { - u1 := NewV2(DomainPerson) - - if u1.Version() != 2 { - t.Errorf("UUIDv2 generated with incorrect version: %d", u1.Version()) - } - - if u1.Variant() != VariantRFC4122 { - t.Errorf("UUIDv2 generated with incorrect variant: %d", u1.Variant()) - } - - u2 := NewV2(DomainGroup) - - if u2.Version() != 2 { - t.Errorf("UUIDv2 generated with incorrect version: %d", u2.Version()) - } - - if u2.Variant() != VariantRFC4122 { - t.Errorf("UUIDv2 generated with incorrect variant: %d", u2.Variant()) - } -} - -func TestNewV3(t *testing.T) { - u := NewV3(NamespaceDNS, "www.example.com") - - if u.Version() != 3 { - t.Errorf("UUIDv3 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv3 generated with incorrect variant: %d", u.Variant()) - } - - if u.String() != "5df41881-3aed-3515-88a7-2f4a814cf09e" { - t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) - } - - u = NewV3(NamespaceDNS, "python.org") - - if u.String() != "6fa459ea-ee8a-3ca4-894e-db77e160355e" { - t.Errorf("UUIDv3 generated incorrectly: %s", u.String()) - } - - u1 := NewV3(NamespaceDNS, "golang.org") - u2 := NewV3(NamespaceDNS, "golang.org") - if !Equal(u1, u2) { - t.Errorf("UUIDv3 generated different UUIDs for same namespace and name: %s and %s", u1, u2) - } - - u3 := NewV3(NamespaceDNS, "example.com") - if Equal(u1, u3) { - t.Errorf("UUIDv3 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) - } - - u4 := NewV3(NamespaceURL, "golang.org") - if Equal(u1, u4) { - t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) - } -} - -func TestNewV4(t *testing.T) { - u := NewV4() - - if u.Version() != 4 { - t.Errorf("UUIDv4 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv4 generated with incorrect variant: %d", u.Variant()) - } -} - -func TestNewV5(t *testing.T) { - u := NewV5(NamespaceDNS, "www.example.com") - - if u.Version() != 5 { - t.Errorf("UUIDv5 generated with incorrect version: %d", u.Version()) - } - - if u.Variant() != VariantRFC4122 { - t.Errorf("UUIDv5 generated with incorrect variant: %d", u.Variant()) - } - - u = NewV5(NamespaceDNS, "python.org") - - if u.String() != "886313e1-3b8a-5372-9b90-0c9aee199e5d" { - t.Errorf("UUIDv5 generated incorrectly: %s", u.String()) - } - - u1 := NewV5(NamespaceDNS, "golang.org") - u2 := NewV5(NamespaceDNS, "golang.org") - if !Equal(u1, u2) { - t.Errorf("UUIDv5 generated different UUIDs for same namespace and name: %s and %s", u1, u2) - } - - u3 := NewV5(NamespaceDNS, "example.com") - if Equal(u1, u3) { - t.Errorf("UUIDv5 generated same UUIDs for different names in same namespace: %s and %s", u1, u2) - } - - u4 := NewV5(NamespaceURL, "golang.org") - if Equal(u1, u4) { - t.Errorf("UUIDv3 generated same UUIDs for sane names in different namespaces: %s and %s", u1, u4) - } + u.SetVariant(VariantNCS) + c.Assert(u.Variant(), Equals, VariantNCS) + u.SetVariant(VariantRFC4122) + c.Assert(u.Variant(), Equals, VariantRFC4122) + u.SetVariant(VariantMicrosoft) + c.Assert(u.Variant(), Equals, VariantMicrosoft) + u.SetVariant(VariantFuture) + c.Assert(u.Variant(), Equals, VariantFuture) } diff --git a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go index a6c4d51..e1e220a 100644 --- a/vendor/github.com/zclconf/go-cty/cty/types_to_register.go +++ b/vendor/github.com/zclconf/go-cty/cty/types_to_register.go @@ -2,7 +2,9 @@ package cty import ( "encoding/gob" + "fmt" "math/big" + "strings" "github.com/zclconf/go-cty/cty/set" ) @@ -45,6 +47,11 @@ func init() { // Register these with gob here, rather than in gob.go, to ensure // that this will always happen after we build the above. for _, tv := range InternalTypesToRegister { - gob.Register(tv) + typeName := fmt.Sprintf("%T", tv) + if strings.HasPrefix(typeName, "cty.") { + gob.RegisterName(fmt.Sprintf("github.com/zclconf/go-cty/%s", typeName), tv) + } else { + gob.Register(tv) + } } } diff --git a/vendor/github.com/zclconf/go-cty/cty/value_ops.go b/vendor/github.com/zclconf/go-cty/cty/value_ops.go index 766e38f..8d7b126 100644 --- a/vendor/github.com/zclconf/go-cty/cty/value_ops.go +++ b/vendor/github.com/zclconf/go-cty/cty/value_ops.go @@ -737,6 +737,30 @@ func (val Value) HasIndex(key Value) Value { } } +// HasElement returns True if the receiver (which must be of a set type) +// has the given value as an element, or False if it does not. +// +// The result will be UnknownVal(Bool) if either the set or the +// given value are unknown. +// +// This method will panic if the receiver is not a set, or if it is a null set. +func (val Value) HasElement(elem Value) Value { + ty := val.Type() + + if !ty.IsSetType() { + panic("not a set type") + } + if !val.IsKnown() || !elem.IsKnown() { + return UnknownVal(Bool) + } + if val.IsNull() { + panic("can't call HasElement on a nil value") + } + + s := val.v.(set.Set) + return BoolVal(s.Has(elem.v)) +} + // Length returns the length of the receiver, which must be a collection type // or tuple type, as a number value. If the receiver is not a compatible type // then this method will panic. diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert.go b/vendor/golang.org/x/crypto/acme/autocert/autocert.go index 94edba9..453e722 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert.go +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert.go @@ -24,7 +24,9 @@ import ( "fmt" "io" mathrand "math/rand" + "net" "net/http" + "path" "strconv" "strings" "sync" @@ -80,8 +82,9 @@ func defaultHostPolicy(context.Context, string) error { } // Manager is a stateful certificate manager built on top of acme.Client. -// It obtains and refreshes certificates automatically, -// as well as providing them to a TLS server via tls.Config. +// It obtains and refreshes certificates automatically using "tls-sni-01", +// "tls-sni-02" and "http-01" challenge types, as well as providing them +// to a TLS server via tls.Config. // // You must specify a cache implementation, such as DirCache, // to reuse obtained certificates across program restarts. @@ -150,15 +153,26 @@ type Manager struct { stateMu sync.Mutex state map[string]*certState // keyed by domain name - // tokenCert is keyed by token domain name, which matches server name - // of ClientHello. Keys always have ".acme.invalid" suffix. - tokenCertMu sync.RWMutex - tokenCert map[string]*tls.Certificate - // renewal tracks the set of domains currently running renewal timers. // It is keyed by domain name. renewalMu sync.Mutex renewal map[string]*domainRenewal + + // tokensMu guards the rest of the fields: tryHTTP01, certTokens and httpTokens. + tokensMu sync.RWMutex + // tryHTTP01 indicates whether the Manager should try "http-01" challenge type + // during the authorization flow. + tryHTTP01 bool + // httpTokens contains response body values for http-01 challenges + // and is keyed by the URL path at which a challenge response is expected + // to be provisioned. + // The entries are stored for the duration of the authorization flow. + httpTokens map[string][]byte + // certTokens contains temporary certificates for tls-sni challenges + // and is keyed by token domain name, which matches server name of ClientHello. + // Keys always have ".acme.invalid" suffix. + // The entries are stored for the duration of the authorization flow. + certTokens map[string]*tls.Certificate } // GetCertificate implements the tls.Config.GetCertificate hook. @@ -185,14 +199,16 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, return nil, errors.New("acme/autocert: server name contains invalid character") } + // In the worst-case scenario, the timeout needs to account for caching, host policy, + // domain ownership verification and certificate issuance. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Minute) defer cancel() // check whether this is a token cert requested for TLS-SNI challenge if strings.HasSuffix(name, ".acme.invalid") { - m.tokenCertMu.RLock() - defer m.tokenCertMu.RUnlock() - if cert := m.tokenCert[name]; cert != nil { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + if cert := m.certTokens[name]; cert != nil { return cert, nil } if cert, err := m.cacheGet(ctx, name); err == nil { @@ -224,6 +240,68 @@ func (m *Manager) GetCertificate(hello *tls.ClientHelloInfo) (*tls.Certificate, return cert, nil } +// HTTPHandler configures the Manager to provision ACME "http-01" challenge responses. +// It returns an http.Handler that responds to the challenges and must be +// running on port 80. If it receives a request that is not an ACME challenge, +// it delegates the request to the optional fallback handler. +// +// If fallback is nil, the returned handler redirects all GET and HEAD requests +// to the default TLS port 443 with 302 Found status code, preserving the original +// request path and query. It responds with 400 Bad Request to all other HTTP methods. +// The fallback is not protected by the optional HostPolicy. +// +// Because the fallback handler is run with unencrypted port 80 requests, +// the fallback should not serve TLS-only requests. +// +// If HTTPHandler is never called, the Manager will only use TLS SNI +// challenges for domain verification. +func (m *Manager) HTTPHandler(fallback http.Handler) http.Handler { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + m.tryHTTP01 = true + + if fallback == nil { + fallback = http.HandlerFunc(handleHTTPRedirect) + } + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if !strings.HasPrefix(r.URL.Path, "/.well-known/acme-challenge/") { + fallback.ServeHTTP(w, r) + return + } + // A reasonable context timeout for cache and host policy only, + // because we don't wait for a new certificate issuance here. + ctx, cancel := context.WithTimeout(r.Context(), time.Minute) + defer cancel() + if err := m.hostPolicy()(ctx, r.Host); err != nil { + http.Error(w, err.Error(), http.StatusForbidden) + return + } + data, err := m.httpToken(ctx, r.URL.Path) + if err != nil { + http.Error(w, err.Error(), http.StatusNotFound) + return + } + w.Write(data) + }) +} + +func handleHTTPRedirect(w http.ResponseWriter, r *http.Request) { + if r.Method != "GET" && r.Method != "HEAD" { + http.Error(w, "Use HTTPS", http.StatusBadRequest) + return + } + target := "https://" + stripPort(r.Host) + r.URL.RequestURI() + http.Redirect(w, r, target, http.StatusFound) +} + +func stripPort(hostport string) string { + host, _, err := net.SplitHostPort(hostport) + if err != nil { + return hostport + } + return net.JoinHostPort(host, "443") +} + // cert returns an existing certificate either from m.state or cache. // If a certificate is found in cache but not in m.state, the latter will be filled // with the cached value. @@ -442,13 +520,14 @@ func (m *Manager) certState(domain string) (*certState, error) { // authorizedCert starts the domain ownership verification process and requests a new cert upon success. // The key argument is the certificate private key. func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain string) (der [][]byte, leaf *x509.Certificate, err error) { - if err := m.verify(ctx, domain); err != nil { - return nil, nil, err - } client, err := m.acmeClient(ctx) if err != nil { return nil, nil, err } + + if err := m.verify(ctx, client, domain); err != nil { + return nil, nil, err + } csr, err := certRequest(key, domain) if err != nil { return nil, nil, err @@ -464,98 +543,171 @@ func (m *Manager) authorizedCert(ctx context.Context, key crypto.Signer, domain return der, leaf, nil } -// verify starts a new identifier (domain) authorization flow. -// It prepares a challenge response and then blocks until the authorization -// is marked as "completed" by the CA (either succeeded or failed). -// -// verify returns nil iff the verification was successful. -func (m *Manager) verify(ctx context.Context, domain string) error { - client, err := m.acmeClient(ctx) - if err != nil { - return err +// verify runs the identifier (domain) authorization flow +// using each applicable ACME challenge type. +func (m *Manager) verify(ctx context.Context, client *acme.Client, domain string) error { + // The list of challenge types we'll try to fulfill + // in this specific order. + challengeTypes := []string{"tls-sni-02", "tls-sni-01"} + m.tokensMu.RLock() + if m.tryHTTP01 { + challengeTypes = append(challengeTypes, "http-01") } + m.tokensMu.RUnlock() - // start domain authorization and get the challenge - authz, err := client.Authorize(ctx, domain) - if err != nil { - return err - } - // maybe don't need to at all - if authz.Status == acme.StatusValid { - return nil - } - - // pick a challenge: prefer tls-sni-02 over tls-sni-01 - // TODO: consider authz.Combinations - var chal *acme.Challenge - for _, c := range authz.Challenges { - if c.Type == "tls-sni-02" { - chal = c - break + var nextTyp int // challengeType index of the next challenge type to try + for { + // Start domain authorization and get the challenge. + authz, err := client.Authorize(ctx, domain) + if err != nil { + return err } - if c.Type == "tls-sni-01" { - chal = c + // No point in accepting challenges if the authorization status + // is in a final state. + switch authz.Status { + case acme.StatusValid: + return nil // already authorized + case acme.StatusInvalid: + return fmt.Errorf("acme/autocert: invalid authorization %q", authz.URI) + } + + // Pick the next preferred challenge. + var chal *acme.Challenge + for chal == nil && nextTyp < len(challengeTypes) { + chal = pickChallenge(challengeTypes[nextTyp], authz.Challenges) + nextTyp++ + } + if chal == nil { + return fmt.Errorf("acme/autocert: unable to authorize %q; tried %q", domain, challengeTypes) + } + cleanup, err := m.fulfill(ctx, client, chal) + if err != nil { + continue + } + defer cleanup() + if _, err := client.Accept(ctx, chal); err != nil { + continue + } + + // A challenge is fulfilled and accepted: wait for the CA to validate. + if _, err := client.WaitAuthorization(ctx, authz.URI); err == nil { + return nil } } - if chal == nil { - return errors.New("acme/autocert: no supported challenge type found") - } - - // create a token cert for the challenge response - var ( - cert tls.Certificate - name string - ) - switch chal.Type { - case "tls-sni-01": - cert, name, err = client.TLSSNI01ChallengeCert(chal.Token) - case "tls-sni-02": - cert, name, err = client.TLSSNI02ChallengeCert(chal.Token) - default: - err = fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) - } - if err != nil { - return err - } - m.putTokenCert(ctx, name, &cert) - defer func() { - // verification has ended at this point - // don't need token cert anymore - go m.deleteTokenCert(name) - }() - - // ready to fulfill the challenge - if _, err := client.Accept(ctx, chal); err != nil { - return err - } - // wait for the CA to validate - _, err = client.WaitAuthorization(ctx, authz.URI) - return err } -// putTokenCert stores the cert under the named key in both m.tokenCert map -// and m.Cache. -func (m *Manager) putTokenCert(ctx context.Context, name string, cert *tls.Certificate) { - m.tokenCertMu.Lock() - defer m.tokenCertMu.Unlock() - if m.tokenCert == nil { - m.tokenCert = make(map[string]*tls.Certificate) +// fulfill provisions a response to the challenge chal. +// The cleanup is non-nil only if provisioning succeeded. +func (m *Manager) fulfill(ctx context.Context, client *acme.Client, chal *acme.Challenge) (cleanup func(), err error) { + switch chal.Type { + case "tls-sni-01": + cert, name, err := client.TLSSNI01ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "tls-sni-02": + cert, name, err := client.TLSSNI02ChallengeCert(chal.Token) + if err != nil { + return nil, err + } + m.putCertToken(ctx, name, &cert) + return func() { go m.deleteCertToken(name) }, nil + case "http-01": + resp, err := client.HTTP01ChallengeResponse(chal.Token) + if err != nil { + return nil, err + } + p := client.HTTP01ChallengePath(chal.Token) + m.putHTTPToken(ctx, p, resp) + return func() { go m.deleteHTTPToken(p) }, nil } - m.tokenCert[name] = cert + return nil, fmt.Errorf("acme/autocert: unknown challenge type %q", chal.Type) +} + +func pickChallenge(typ string, chal []*acme.Challenge) *acme.Challenge { + for _, c := range chal { + if c.Type == typ { + return c + } + } + return nil +} + +// putCertToken stores the cert under the named key in both m.certTokens map +// and m.Cache. +func (m *Manager) putCertToken(ctx context.Context, name string, cert *tls.Certificate) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.certTokens == nil { + m.certTokens = make(map[string]*tls.Certificate) + } + m.certTokens[name] = cert m.cachePut(ctx, name, cert) } -// deleteTokenCert removes the token certificate for the specified domain name -// from both m.tokenCert map and m.Cache. -func (m *Manager) deleteTokenCert(name string) { - m.tokenCertMu.Lock() - defer m.tokenCertMu.Unlock() - delete(m.tokenCert, name) +// deleteCertToken removes the token certificate for the specified domain name +// from both m.certTokens map and m.Cache. +func (m *Manager) deleteCertToken(name string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.certTokens, name) if m.Cache != nil { m.Cache.Delete(context.Background(), name) } } +// httpToken retrieves an existing http-01 token value from an in-memory map +// or the optional cache. +func (m *Manager) httpToken(ctx context.Context, tokenPath string) ([]byte, error) { + m.tokensMu.RLock() + defer m.tokensMu.RUnlock() + if v, ok := m.httpTokens[tokenPath]; ok { + return v, nil + } + if m.Cache == nil { + return nil, fmt.Errorf("acme/autocert: no token at %q", tokenPath) + } + return m.Cache.Get(ctx, httpTokenCacheKey(tokenPath)) +} + +// putHTTPToken stores an http-01 token value using tokenPath as key +// in both in-memory map and the optional Cache. +// +// It ignores any error returned from Cache.Put. +func (m *Manager) putHTTPToken(ctx context.Context, tokenPath, val string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + if m.httpTokens == nil { + m.httpTokens = make(map[string][]byte) + } + b := []byte(val) + m.httpTokens[tokenPath] = b + if m.Cache != nil { + m.Cache.Put(ctx, httpTokenCacheKey(tokenPath), b) + } +} + +// deleteHTTPToken removes an http-01 token value from both in-memory map +// and the optional Cache, ignoring any error returned from the latter. +// +// If m.Cache is non-nil, it blocks until Cache.Delete returns without a timeout. +func (m *Manager) deleteHTTPToken(tokenPath string) { + m.tokensMu.Lock() + defer m.tokensMu.Unlock() + delete(m.httpTokens, tokenPath) + if m.Cache != nil { + m.Cache.Delete(context.Background(), httpTokenCacheKey(tokenPath)) + } +} + +// httpTokenCacheKey returns a key at which an http-01 token value may be stored +// in the Manager's optional Cache. +func httpTokenCacheKey(tokenPath string) string { + return "http-01-" + path.Base(tokenPath) +} + // renew starts a cert renewal timer loop, one per domain. // // The loop is scheduled in two cases: diff --git a/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go index 43a6201..2da1912 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go +++ b/vendor/golang.org/x/crypto/acme/autocert/autocert_test.go @@ -23,6 +23,7 @@ import ( "net/http" "net/http/httptest" "reflect" + "strings" "sync" "testing" "time" @@ -48,6 +49,16 @@ var authzTmpl = template.Must(template.New("authz").Parse(`{ "uri": "{{.}}/challenge/2", "type": "tls-sni-02", "token": "token-02" + }, + { + "uri": "{{.}}/challenge/dns-01", + "type": "dns-01", + "token": "token-dns-01" + }, + { + "uri": "{{.}}/challenge/http-01", + "type": "http-01", + "token": "token-http-01" } ] }`)) @@ -419,6 +430,146 @@ func testGetCertificate(t *testing.T, man *Manager, domain string, hello *tls.Cl } +func TestVerifyHTTP01(t *testing.T) { + var ( + http01 http.Handler + + authzCount int // num. of created authorizations + didAcceptHTTP01 bool + ) + + verifyHTTPToken := func() { + r := httptest.NewRequest("GET", "/.well-known/acme-challenge/token-http-01", nil) + w := httptest.NewRecorder() + http01.ServeHTTP(w, r) + if w.Code != http.StatusOK { + t.Errorf("http token: w.Code = %d; want %d", w.Code, http.StatusOK) + } + if v := string(w.Body.Bytes()); !strings.HasPrefix(v, "token-http-01.") { + t.Errorf("http token value = %q; want 'token-http-01.' prefix", v) + } + } + + // ACME CA server stub, only the needed bits. + // TODO: Merge this with startACMEServerStub, making it a configurable CA for testing. + var ca *httptest.Server + ca = httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Replay-Nonce", "nonce") + if r.Method == "HEAD" { + // a nonce request + return + } + + switch r.URL.Path { + // Discovery. + case "/": + if err := discoTmpl.Execute(w, ca.URL); err != nil { + t.Errorf("discoTmpl: %v", err) + } + // Client key registration. + case "/new-reg": + w.Write([]byte("{}")) + // New domain authorization. + case "/new-authz": + authzCount++ + w.Header().Set("Location", fmt.Sprintf("%s/authz/%d", ca.URL, authzCount)) + w.WriteHeader(http.StatusCreated) + if err := authzTmpl.Execute(w, ca.URL); err != nil { + t.Errorf("authzTmpl: %v", err) + } + // Accept tls-sni-02. + case "/challenge/2": + w.Write([]byte("{}")) + // Reject tls-sni-01. + case "/challenge/1": + http.Error(w, "won't accept tls-sni-01", http.StatusBadRequest) + // Should not accept dns-01. + case "/challenge/dns-01": + t.Errorf("dns-01 challenge was accepted") + http.Error(w, "won't accept dns-01", http.StatusBadRequest) + // Accept http-01. + case "/challenge/http-01": + didAcceptHTTP01 = true + verifyHTTPToken() + w.Write([]byte("{}")) + // Authorization statuses. + // Make tls-sni-xxx invalid. + case "/authz/1", "/authz/2": + w.Write([]byte(`{"status": "invalid"}`)) + case "/authz/3", "/authz/4": + w.Write([]byte(`{"status": "valid"}`)) + default: + http.NotFound(w, r) + t.Errorf("unrecognized r.URL.Path: %s", r.URL.Path) + } + })) + defer ca.Close() + + key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + m := &Manager{ + Client: &acme.Client{ + Key: key, + DirectoryURL: ca.URL, + }, + } + http01 = m.HTTPHandler(nil) + if err := m.verify(context.Background(), m.Client, "example.org"); err != nil { + t.Errorf("m.verify: %v", err) + } + // Only tls-sni-01, tls-sni-02 and http-01 must be accepted + // The dns-01 challenge is unsupported. + if authzCount != 3 { + t.Errorf("authzCount = %d; want 3", authzCount) + } + if !didAcceptHTTP01 { + t.Error("did not accept http-01 challenge") + } +} + +func TestHTTPHandlerDefaultFallback(t *testing.T) { + tt := []struct { + method, url string + wantCode int + wantLocation string + }{ + {"GET", "http://example.org", 302, "https://example.org/"}, + {"GET", "http://example.org/foo", 302, "https://example.org/foo"}, + {"GET", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"}, + {"GET", "http://example.org/?a=b", 302, "https://example.org/?a=b"}, + {"GET", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"}, + {"GET", "http://example.org:80/foo?a=b", 302, "https://example.org:443/foo?a=b"}, + {"GET", "http://example.org:80/foo%20bar", 302, "https://example.org:443/foo%20bar"}, + {"GET", "http://[2602:d1:xxxx::c60a]:1234", 302, "https://[2602:d1:xxxx::c60a]:443/"}, + {"GET", "http://[2602:d1:xxxx::c60a]", 302, "https://[2602:d1:xxxx::c60a]/"}, + {"GET", "http://[2602:d1:xxxx::c60a]/foo?a=b", 302, "https://[2602:d1:xxxx::c60a]/foo?a=b"}, + {"HEAD", "http://example.org", 302, "https://example.org/"}, + {"HEAD", "http://example.org/foo", 302, "https://example.org/foo"}, + {"HEAD", "http://example.org/foo/bar/", 302, "https://example.org/foo/bar/"}, + {"HEAD", "http://example.org/?a=b", 302, "https://example.org/?a=b"}, + {"HEAD", "http://example.org/foo?a=b", 302, "https://example.org/foo?a=b"}, + {"POST", "http://example.org", 400, ""}, + {"PUT", "http://example.org", 400, ""}, + {"GET", "http://example.org/.well-known/acme-challenge/x", 404, ""}, + } + var m Manager + h := m.HTTPHandler(nil) + for i, test := range tt { + r := httptest.NewRequest(test.method, test.url, nil) + w := httptest.NewRecorder() + h.ServeHTTP(w, r) + if w.Code != test.wantCode { + t.Errorf("%d: w.Code = %d; want %d", i, w.Code, test.wantCode) + t.Errorf("%d: body: %s", i, w.Body.Bytes()) + } + if v := w.Header().Get("Location"); v != test.wantLocation { + t.Errorf("%d: Location = %q; want %q", i, v, test.wantLocation) + } + } +} + func TestAccountKeyCache(t *testing.T) { m := Manager{Cache: newMemCache()} ctx := context.Background() diff --git a/vendor/golang.org/x/crypto/acme/autocert/example_test.go b/vendor/golang.org/x/crypto/acme/autocert/example_test.go index 71d61eb..552a625 100644 --- a/vendor/golang.org/x/crypto/acme/autocert/example_test.go +++ b/vendor/golang.org/x/crypto/acme/autocert/example_test.go @@ -22,11 +22,12 @@ func ExampleNewListener() { } func ExampleManager() { - m := autocert.Manager{ + m := &autocert.Manager{ Cache: autocert.DirCache("secret-dir"), Prompt: autocert.AcceptTOS, HostPolicy: autocert.HostWhitelist("example.org"), } + go http.ListenAndServe(":http", m.HTTPHandler(nil)) s := &http.Server{ Addr: ":https", TLSConfig: &tls.Config{GetCertificate: m.GetCertificate}, diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go index 893e272..e3c01d7 100644 --- a/vendor/golang.org/x/net/html/token.go +++ b/vendor/golang.org/x/net/html/token.go @@ -1161,8 +1161,8 @@ func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) { return nil, nil, false } -// Token returns the next Token. The result's Data and Attr values remain valid -// after subsequent Next calls. +// Token returns the current Token. The result's Data and Attr values remain +// valid after subsequent Next calls. func (z *Tokenizer) Token() Token { t := Token{Type: z.tt} switch z.tt { diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go index b65fc6d..088d6e2 100644 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ b/vendor/golang.org/x/net/http2/configure_transport.go @@ -73,7 +73,7 @@ type noDialH2RoundTripper struct{ t *Transport } func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { + if isNoCachedConnError(err) { return nil, http.ErrSkipAltProtocol } return res, err diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 7a50226..460ede0 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -2509,7 +2509,6 @@ func checkWriteHeaderCode(code int) { } func (w *responseWriter) WriteHeader(code int) { - checkWriteHeaderCode(code) rws := w.rws if rws == nil { panic("WriteHeader called after Handler finished") @@ -2519,6 +2518,7 @@ func (w *responseWriter) WriteHeader(code int) { func (rws *responseWriterState) writeHeader(code int) { if !rws.wroteHeader { + checkWriteHeaderCode(code) rws.wroteHeader = true rws.status = code if len(rws.handlerHeader) > 0 { diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index c65f1a3..e6b321f 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -306,7 +306,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { return } -var ErrNoCachedConn = errors.New("http2: no cached connection was available") +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} // RoundTripOpt are options for the Transport.RoundTripOpt method. type RoundTripOpt struct { diff --git a/vendor/golang.org/x/sys/unix/affinity_linux.go b/vendor/golang.org/x/sys/unix/affinity_linux.go new file mode 100644 index 0000000..d81fbb5 --- /dev/null +++ b/vendor/golang.org/x/sys/unix/affinity_linux.go @@ -0,0 +1,124 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// CPU affinity functions + +package unix + +import ( + "unsafe" +) + +const cpuSetSize = _CPU_SETSIZE / _NCPUBITS + +// CPUSet represents a CPU affinity mask. +type CPUSet [cpuSetSize]cpuMask + +func schedAffinity(trap uintptr, pid int, set *CPUSet) error { + _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(set)), uintptr(unsafe.Pointer(set))) + if e != 0 { + return errnoErr(e) + } + return nil +} + +// SchedGetaffinity gets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func SchedGetaffinity(pid int, set *CPUSet) error { + return schedAffinity(SYS_SCHED_GETAFFINITY, pid, set) +} + +// SchedSetaffinity sets the CPU affinity mask of the thread specified by pid. +// If pid is 0 the calling thread is used. +func SchedSetaffinity(pid int, set *CPUSet) error { + return schedAffinity(SYS_SCHED_SETAFFINITY, pid, set) +} + +// Zero clears the set s, so that it contains no CPUs. +func (s *CPUSet) Zero() { + for i := range s { + s[i] = 0 + } +} + +func cpuBitsIndex(cpu int) int { + return cpu / _NCPUBITS +} + +func cpuBitsMask(cpu int) cpuMask { + return cpuMask(1 << (uint(cpu) % _NCPUBITS)) +} + +// Set adds cpu to the set s. +func (s *CPUSet) Set(cpu int) { + i := cpuBitsIndex(cpu) + if i < len(s) { + s[i] |= cpuBitsMask(cpu) + } +} + +// Clear removes cpu from the set s. +func (s *CPUSet) Clear(cpu int) { + i := cpuBitsIndex(cpu) + if i < len(s) { + s[i] &^= cpuBitsMask(cpu) + } +} + +// IsSet reports whether cpu is in the set s. +func (s *CPUSet) IsSet(cpu int) bool { + i := cpuBitsIndex(cpu) + if i < len(s) { + return s[i]&cpuBitsMask(cpu) != 0 + } + return false +} + +// Count returns the number of CPUs in the set s. +func (s *CPUSet) Count() int { + c := 0 + for _, b := range s { + c += onesCount64(uint64(b)) + } + return c +} + +// onesCount64 is a copy of Go 1.9's math/bits.OnesCount64. +// Once this package can require Go 1.9, we can delete this +// and update the caller to use bits.OnesCount64. +func onesCount64(x uint64) int { + const m0 = 0x5555555555555555 // 01010101 ... + const m1 = 0x3333333333333333 // 00110011 ... + const m2 = 0x0f0f0f0f0f0f0f0f // 00001111 ... + const m3 = 0x00ff00ff00ff00ff // etc. + const m4 = 0x0000ffff0000ffff + + // Implementation: Parallel summing of adjacent bits. + // See "Hacker's Delight", Chap. 5: Counting Bits. + // The following pattern shows the general approach: + // + // x = x>>1&(m0&m) + x&(m0&m) + // x = x>>2&(m1&m) + x&(m1&m) + // x = x>>4&(m2&m) + x&(m2&m) + // x = x>>8&(m3&m) + x&(m3&m) + // x = x>>16&(m4&m) + x&(m4&m) + // x = x>>32&(m5&m) + x&(m5&m) + // return int(x) + // + // Masking (& operations) can be left away when there's no + // danger that a field's sum will carry over into the next + // field: Since the result cannot be > 64, 8 bits is enough + // and we can ignore the masks for the shifts by 8 and up. + // Per "Hacker's Delight", the first line can be simplified + // more, but it saves at best one instruction, so we leave + // it alone for clarity. + const m = 1<<64 - 1 + x = x>>1&(m0&m) + x&(m0&m) + x = x>>2&(m1&m) + x&(m1&m) + x = (x>>4 + x) & (m2 & m) + x += x >> 8 + x += x >> 16 + x += x >> 32 + return int(x) & (1<<7 - 1) +} diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm.s b/vendor/golang.org/x/sys/unix/asm_linux_arm.s index b90a883..cf0f357 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm.s @@ -35,7 +35,6 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-24 BL runtime·exitsyscall(SB) RET - TEXT ·RawSyscall(SB),NOSPLIT,$0-28 B syscall·RawSyscall(SB) @@ -53,5 +52,5 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24 MOVW R0, r2+20(FP) RET -TEXT ·seek(SB),NOSPLIT,$0-32 +TEXT ·seek(SB),NOSPLIT,$0-28 B syscall·seek(SB) diff --git a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s index d84fd46..afe6fdf 100644 --- a/vendor/golang.org/x/sys/unix/asm_linux_arm64.s +++ b/vendor/golang.org/x/sys/unix/asm_linux_arm64.s @@ -17,7 +17,7 @@ TEXT ·Syscall(SB),NOSPLIT,$0-56 TEXT ·Syscall6(SB),NOSPLIT,$0-80 B syscall·Syscall6(SB) -TEXT ·Syscall(SB),NOSPLIT,$0-48 +TEXT ·SyscallNoError(SB),NOSPLIT,$0-48 BL runtime·entersyscall(SB) MOVD a1+8(FP), R0 MOVD a2+16(FP), R1 diff --git a/vendor/golang.org/x/sys/unix/env_unix.go b/vendor/golang.org/x/sys/unix/env_unix.go index 2e06b33..706b3cd 100644 --- a/vendor/golang.org/x/sys/unix/env_unix.go +++ b/vendor/golang.org/x/sys/unix/env_unix.go @@ -25,3 +25,7 @@ func Clearenv() { func Environ() []string { return syscall.Environ() } + +func Unsetenv(key string) error { + return syscall.Unsetenv(key) +} diff --git a/vendor/golang.org/x/sys/unix/env_unset.go b/vendor/golang.org/x/sys/unix/env_unset.go deleted file mode 100644 index c44fdc4..0000000 --- a/vendor/golang.org/x/sys/unix/env_unset.go +++ /dev/null @@ -1,14 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.4 - -package unix - -import "syscall" - -func Unsetenv(key string) error { - // This was added in Go 1.4. - return syscall.Unsetenv(key) -} diff --git a/vendor/golang.org/x/sys/unix/gccgo.go b/vendor/golang.org/x/sys/unix/gccgo.go index 40bed3f..50062e3 100644 --- a/vendor/golang.org/x/sys/unix/gccgo.go +++ b/vendor/golang.org/x/sys/unix/gccgo.go @@ -11,9 +11,19 @@ import "syscall" // We can't use the gc-syntax .s files for gccgo. On the plus side // much of the functionality can be written directly in Go. +//extern gccgoRealSyscallNoError +func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr) + //extern gccgoRealSyscall func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) +func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { + syscall.Entersyscall() + r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + syscall.Exitsyscall() + return r, 0 +} + func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { syscall.Entersyscall() r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) @@ -35,6 +45,11 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, return r, 0, syscall.Errno(errno) } +func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) { + r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) + return r, 0 +} + func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) return r, 0, syscall.Errno(errno) diff --git a/vendor/golang.org/x/sys/unix/gccgo_c.c b/vendor/golang.org/x/sys/unix/gccgo_c.c index 99a774f..24e96b1 100644 --- a/vendor/golang.org/x/sys/unix/gccgo_c.c +++ b/vendor/golang.org/x/sys/unix/gccgo_c.c @@ -31,6 +31,12 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp return r; } +uintptr_t +gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9) +{ + return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9); +} + // Define the use function in C so that it is not inlined. extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline)); diff --git a/vendor/golang.org/x/sys/unix/mkerrors.sh b/vendor/golang.org/x/sys/unix/mkerrors.sh index 2a44da5..4dd40c1 100755 --- a/vendor/golang.org/x/sys/unix/mkerrors.sh +++ b/vendor/golang.org/x/sys/unix/mkerrors.sh @@ -187,6 +187,7 @@ struct ltchars { #include #include #include +#include #include #include #include @@ -426,7 +427,9 @@ ccflags="$@" $2 ~ /^(VM|VMADDR)_/ || $2 ~ /^IOCTL_VM_SOCKETS_/ || $2 ~ /^(TASKSTATS|TS)_/ || + $2 ~ /^CGROUPSTATS_/ || $2 ~ /^GENL_/ || + $2 ~ /^STATX_/ || $2 ~ /^UTIME_/ || $2 ~ /^XATTR_(CREATE|REPLACE)/ || $2 ~ /^ATTR_(BIT_MAP_COUNT|(CMN|VOL|FILE)_)/ || diff --git a/vendor/golang.org/x/sys/unix/mkpost.go b/vendor/golang.org/x/sys/unix/mkpost.go index dbdfd0a..4c191f0 100644 --- a/vendor/golang.org/x/sys/unix/mkpost.go +++ b/vendor/golang.org/x/sys/unix/mkpost.go @@ -61,6 +61,10 @@ func main() { convertUtsnameRegex := regexp.MustCompile(`((Sys|Node|Domain)name|Release|Version|Machine)(\s+)\[(\d+)\]u?int8`) b = convertUtsnameRegex.ReplaceAll(b, []byte("$1$3[$4]byte")) + // Remove spare fields (e.g. in Statx_t) + spareFieldsRegex := regexp.MustCompile(`X__spare\S*`) + b = spareFieldsRegex.ReplaceAll(b, []byte("_")) + // We refuse to export private fields on s390x if goarch == "s390x" && goos == "linux" { // Remove cgo padding fields diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go index f77f44a..71c58e1 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux.go @@ -1318,6 +1318,7 @@ func Setgid(uid int) (err error) { //sys Setpriority(which int, who int, prio int) (err error) //sys Setxattr(path string, attr string, data []byte, flags int) (err error) +//sys Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) //sys Sync() //sys Syncfs(fd int) (err error) //sysnb Sysinfo(info *Sysinfo_t) (err error) @@ -1455,11 +1456,9 @@ func Vmsplice(fd int, iovs []Iovec, flags int) (int, error) { // RtSigtimedwait // SchedGetPriorityMax // SchedGetPriorityMin -// SchedGetaffinity // SchedGetparam // SchedGetscheduler // SchedRrGetInterval -// SchedSetaffinity // SchedSetparam // SchedYield // Security diff --git a/vendor/golang.org/x/sys/unix/syscall_linux_test.go b/vendor/golang.org/x/sys/unix/syscall_linux_test.go index ea9562f..78d2879 100644 --- a/vendor/golang.org/x/sys/unix/syscall_linux_test.go +++ b/vendor/golang.org/x/sys/unix/syscall_linux_test.go @@ -9,6 +9,7 @@ package unix_test import ( "io/ioutil" "os" + "runtime" "testing" "time" @@ -19,9 +20,12 @@ func TestFchmodat(t *testing.T) { defer chtmpdir(t)() touch(t, "file1") - os.Symlink("file1", "symlink1") + err := os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } - err := unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, 0) + err = unix.Fchmodat(unix.AT_FDCWD, "symlink1", 0444, 0) if err != nil { t.Fatalf("Fchmodat: unexpected error: %v", err) } @@ -238,7 +242,10 @@ func TestFstatat(t *testing.T) { t.Errorf("Fstatat: returned stat does not match Stat") } - os.Symlink("file1", "symlink1") + err = os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } err = unix.Lstat("symlink1", &st1) if err != nil { @@ -255,6 +262,148 @@ func TestFstatat(t *testing.T) { } } +func TestSchedSetaffinity(t *testing.T) { + runtime.LockOSThread() + defer runtime.UnlockOSThread() + + var oldMask unix.CPUSet + err := unix.SchedGetaffinity(0, &oldMask) + if err != nil { + t.Fatalf("SchedGetaffinity: %v", err) + } + + var newMask unix.CPUSet + newMask.Zero() + if newMask.Count() != 0 { + t.Errorf("CpuZero: didn't zero CPU set: %v", newMask) + } + cpu := 1 + newMask.Set(cpu) + if newMask.Count() != 1 || !newMask.IsSet(cpu) { + t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask) + } + cpu = 5 + newMask.Set(cpu) + if newMask.Count() != 2 || !newMask.IsSet(cpu) { + t.Errorf("CpuSet: didn't set CPU %d in set: %v", cpu, newMask) + } + newMask.Clear(cpu) + if newMask.Count() != 1 || newMask.IsSet(cpu) { + t.Errorf("CpuClr: didn't clear CPU %d in set: %v", cpu, newMask) + } + + err = unix.SchedSetaffinity(0, &newMask) + if err != nil { + t.Fatalf("SchedSetaffinity: %v", err) + } + + var gotMask unix.CPUSet + err = unix.SchedGetaffinity(0, &gotMask) + if err != nil { + t.Fatalf("SchedGetaffinity: %v", err) + } + + if gotMask != newMask { + t.Errorf("SchedSetaffinity: returned affinity mask does not match set affinity mask") + } + + // Restore old mask so it doesn't affect successive tests + err = unix.SchedSetaffinity(0, &oldMask) + if err != nil { + t.Fatalf("SchedSetaffinity: %v", err) + } +} + +func TestStatx(t *testing.T) { + var stx unix.Statx_t + err := unix.Statx(unix.AT_FDCWD, ".", 0, 0, &stx) + if err == unix.ENOSYS { + t.Skip("statx syscall is not available, skipping test") + } else if err != nil { + t.Fatalf("Statx: %v", err) + } + + defer chtmpdir(t)() + touch(t, "file1") + + var st unix.Stat_t + err = unix.Stat("file1", &st) + if err != nil { + t.Fatalf("Stat: %v", err) + } + + flags := unix.AT_STATX_SYNC_AS_STAT + err = unix.Statx(unix.AT_FDCWD, "file1", flags, unix.STATX_ALL, &stx) + if err != nil { + t.Fatalf("Statx: %v", err) + } + + if uint32(stx.Mode) != st.Mode { + t.Errorf("Statx: returned stat mode does not match Stat") + } + + atime := unix.StatxTimestamp{Sec: int64(st.Atim.Sec), Nsec: uint32(st.Atim.Nsec)} + ctime := unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)} + mtime := unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)} + + if stx.Atime != atime { + t.Errorf("Statx: returned stat atime does not match Stat") + } + if stx.Ctime != ctime { + t.Errorf("Statx: returned stat ctime does not match Stat") + } + if stx.Mtime != mtime { + t.Errorf("Statx: returned stat mtime does not match Stat") + } + + err = os.Symlink("file1", "symlink1") + if err != nil { + t.Fatal(err) + } + + err = unix.Lstat("symlink1", &st) + if err != nil { + t.Fatalf("Lstat: %v", err) + } + + err = unix.Statx(unix.AT_FDCWD, "symlink1", flags, unix.STATX_BASIC_STATS, &stx) + if err != nil { + t.Fatalf("Statx: %v", err) + } + + // follow symlink, expect a regulat file + if stx.Mode&unix.S_IFREG == 0 { + t.Errorf("Statx: didn't follow symlink") + } + + err = unix.Statx(unix.AT_FDCWD, "symlink1", flags|unix.AT_SYMLINK_NOFOLLOW, unix.STATX_ALL, &stx) + if err != nil { + t.Fatalf("Statx: %v", err) + } + + // follow symlink, expect a symlink + if stx.Mode&unix.S_IFLNK == 0 { + t.Errorf("Statx: unexpectedly followed symlink") + } + if uint32(stx.Mode) != st.Mode { + t.Errorf("Statx: returned stat mode does not match Lstat") + } + + atime = unix.StatxTimestamp{Sec: int64(st.Atim.Sec), Nsec: uint32(st.Atim.Nsec)} + ctime = unix.StatxTimestamp{Sec: int64(st.Ctim.Sec), Nsec: uint32(st.Ctim.Nsec)} + mtime = unix.StatxTimestamp{Sec: int64(st.Mtim.Sec), Nsec: uint32(st.Mtim.Nsec)} + + if stx.Atime != atime { + t.Errorf("Statx: returned stat atime does not match Lstat") + } + if stx.Ctime != ctime { + t.Errorf("Statx: returned stat ctime does not match Lstat") + } + if stx.Mtime != mtime { + t.Errorf("Statx: returned stat mtime does not match Lstat") + } +} + // utilities taken from os/os_test.go func touch(t *testing.T, name string) { diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go index 8947248..4fba476 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go @@ -1638,6 +1638,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go index 4083cb2..7e2a108 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go @@ -1639,6 +1639,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go index 27d3835..250841b 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go @@ -1643,6 +1643,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go index 69ad314..f5d7856 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go @@ -1629,6 +1629,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go index d131a4c..f45492d 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go index 62dd203..f5a64fb 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go index dc8e56e..db6d556 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go index 9067662..4a62a55 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go @@ -1641,6 +1641,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go index f6ca82c..5e1e81e 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go @@ -1696,6 +1696,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go index ddd2562..6a80324 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go @@ -1696,6 +1696,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go index fc304a6..af5a895 100644 --- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go @@ -1700,6 +1700,27 @@ const ( SPLICE_F_MORE = 0x4 SPLICE_F_MOVE = 0x1 SPLICE_F_NONBLOCK = 0x2 + STATX_ALL = 0xfff + STATX_ATIME = 0x20 + STATX_ATTR_APPEND = 0x20 + STATX_ATTR_AUTOMOUNT = 0x1000 + STATX_ATTR_COMPRESSED = 0x4 + STATX_ATTR_ENCRYPTED = 0x800 + STATX_ATTR_IMMUTABLE = 0x10 + STATX_ATTR_NODUMP = 0x40 + STATX_BASIC_STATS = 0x7ff + STATX_BLOCKS = 0x400 + STATX_BTIME = 0x800 + STATX_CTIME = 0x80 + STATX_GID = 0x10 + STATX_INO = 0x100 + STATX_MODE = 0x2 + STATX_MTIME = 0x40 + STATX_NLINK = 0x4 + STATX_SIZE = 0x200 + STATX_TYPE = 0x1 + STATX_UID = 0x8 + STATX__RESERVED = 0x80000000 S_BLKSIZE = 0x200 S_IEXEC = 0x40 S_IFBLK = 0x6000 diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go index dcb9547..ef9602c 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_386.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go index badf57e..63054b3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_amd64.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go index 69765c3..8b10ee1 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go index dc8e642..8f276d6 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_arm64.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go index 59f50c8..61169b3 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go index 3803380..4cb59b4 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go index fee2f85..0b547ae 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mips64le.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go index 4094d3d..cd94d3a 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_mipsle.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go index d83bafb..cdad555 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go index 460971c..38f4e44 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_ppc64le.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go index b7ef121..c443baf 100644 --- a/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/zsyscall_linux_s390x.go @@ -1238,6 +1238,21 @@ func Setxattr(path string, attr string, data []byte, flags int) (err error) { // THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT +func Statx(dirfd int, path string, flags int, mask int, stat *Statx_t) (err error) { + var _p0 *byte + _p0, err = BytePtrFromString(path) + if err != nil { + return + } + _, _, e1 := Syscall6(SYS_STATX, uintptr(dirfd), uintptr(unsafe.Pointer(_p0)), uintptr(flags), uintptr(mask), uintptr(unsafe.Pointer(stat)), 0) + if e1 != 0 { + err = errnoErr(e1) + } + return +} + +// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT + func Sync() { SyscallNoError(SYS_SYNC, 0, 0, 0) return diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go index 295a9bd..7cf0a74 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_386.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_386.go @@ -131,6 +131,36 @@ type Statfs_t struct { Spare [4]int32 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -643,9 +673,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -760,6 +796,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -792,3 +846,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go index cdd4a1d..1671205 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_amd64.go @@ -131,6 +131,36 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -661,9 +691,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -778,6 +814,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -810,3 +864,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go index 086620a..cc0c3de 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm.go @@ -133,6 +133,36 @@ type Statfs_t struct { Pad_cgo_0 [4]byte } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -632,9 +662,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -749,6 +785,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -781,3 +835,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go index c53db98..d82cca9 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_arm64.go @@ -132,6 +132,36 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -640,9 +670,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -757,6 +793,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -789,3 +843,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go index 69e529b..f9a7fae 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips.go @@ -132,6 +132,36 @@ type Statfs_t struct { Pad_cgo_1 [4]byte } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -637,9 +667,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -754,6 +790,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -786,3 +840,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go index 4460279..5b6c48f 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64.go @@ -132,6 +132,36 @@ type Statfs_t struct { Spare [5]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -642,9 +672,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -759,6 +795,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -791,3 +845,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go index 9892381..23af0f5 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mips64le.go @@ -132,6 +132,36 @@ type Statfs_t struct { Spare [5]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -642,9 +672,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -759,6 +795,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -791,3 +845,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go index c2f4c03..32f2a86 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_mipsle.go @@ -132,6 +132,36 @@ type Statfs_t struct { Pad_cgo_1 [4]byte } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -637,9 +667,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -754,6 +790,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -786,3 +840,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint32 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x20 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go index ffe7883..51f6778 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64.go @@ -133,6 +133,36 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -650,9 +680,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -767,6 +803,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -799,3 +853,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go index 7e3d946..59497fe 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_ppc64le.go @@ -133,6 +133,36 @@ type Statfs_t struct { Spare [4]int64 } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + X__reserved int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -650,9 +680,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STATX_SYNC_AS_STAT = 0x0 + AT_STATX_FORCE_SYNC = 0x2000 + AT_STATX_DONT_SYNC = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -767,6 +803,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -799,3 +853,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go index 730fa8a..c064b3b 100644 --- a/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go +++ b/vendor/golang.org/x/sys/unix/ztypes_linux_s390x.go @@ -132,6 +132,36 @@ type Statfs_t struct { _ [4]byte } +type StatxTimestamp struct { + Sec int64 + Nsec uint32 + _ int32 +} + +type Statx_t struct { + Mask uint32 + Blksize uint32 + Attributes uint64 + Nlink uint32 + Uid uint32 + Gid uint32 + Mode uint16 + _ [1]uint16 + Ino uint64 + Size uint64 + Blocks uint64 + Attributes_mask uint64 + Atime StatxTimestamp + Btime StatxTimestamp + Ctime StatxTimestamp + Mtime StatxTimestamp + Rdev_major uint32 + Rdev_minor uint32 + Dev_major uint32 + Dev_minor uint32 + _ [14]uint64 +} + type Dirent struct { Ino uint64 Off int64 @@ -667,9 +697,15 @@ type EpollEvent struct { } const ( - AT_FDCWD = -0x64 - AT_NO_AUTOMOUNT = 0x800 - AT_REMOVEDIR = 0x200 + AT_EMPTY_PATH = 0x1000 + AT_FDCWD = -0x64 + AT_NO_AUTOMOUNT = 0x800 + AT_REMOVEDIR = 0x200 + + AT_STAT_ = 0x0 + AT_STAT_ = 0x2000 + AT_STAT_ = 0x4000 + AT_SYMLINK_FOLLOW = 0x400 AT_SYMLINK_NOFOLLOW = 0x100 ) @@ -784,6 +820,24 @@ const ( TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK = 0x4 ) +type CGroupStats struct { + Sleeping uint64 + Running uint64 + Stopped uint64 + Uninterruptible uint64 + Io_wait uint64 +} + +const ( + CGROUPSTATS_CMD_UNSPEC = 0x3 + CGROUPSTATS_CMD_GET = 0x4 + CGROUPSTATS_CMD_NEW = 0x5 + CGROUPSTATS_TYPE_UNSPEC = 0x0 + CGROUPSTATS_TYPE_CGROUP_STATS = 0x1 + CGROUPSTATS_CMD_ATTR_UNSPEC = 0x0 + CGROUPSTATS_CMD_ATTR_FD = 0x1 +) + type Genlmsghdr struct { Cmd uint8 Version uint8 @@ -816,3 +870,10 @@ const ( CTRL_ATTR_MCAST_GRP_NAME = 0x1 CTRL_ATTR_MCAST_GRP_ID = 0x2 ) + +type cpuMask uint64 + +const ( + _CPU_SETSIZE = 0x400 + _NCPUBITS = 0x40 +) diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index 1d079eb..bfbef36 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -1119,8 +1119,8 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, } done := make(chan struct{}) onPrefaceReceipt := func() { - close(done) ac.mu.Lock() + close(done) if !ac.backoffDeadline.IsZero() { // If we haven't already started reconnecting to // other backends. @@ -1185,10 +1185,16 @@ func (ac *addrConn) createTransport(connectRetryNum, ridx int, backoffDeadline, close(ac.ready) ac.ready = nil } - ac.connectRetryNum = connectRetryNum - ac.backoffDeadline = backoffDeadline - ac.connectDeadline = connectDeadline - ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list. + select { + case <-done: + // If the server has responded back with preface already, + // don't set the reconnect parameters. + default: + ac.connectRetryNum = connectRetryNum + ac.backoffDeadline = backoffDeadline + ac.connectDeadline = connectDeadline + ac.reconnectIdx = i + 1 // Start reconnecting from the next backend in the list. + } ac.mu.Unlock() return true, nil } diff --git a/vendor/google.golang.org/grpc/rpc_util.go b/vendor/google.golang.org/grpc/rpc_util.go index 6a5a7de..bf384b6 100644 --- a/vendor/google.golang.org/grpc/rpc_util.go +++ b/vendor/google.golang.org/grpc/rpc_util.go @@ -500,6 +500,6 @@ const ( ) // Version is the current grpc version. -const Version = "1.9.0" +const Version = "1.9.1" const grpcUA = "grpc-go/" + Version