update deps, vendor mage

This commit is contained in:
Cadey Ratio 2018-01-16 20:32:31 -08:00
parent 752d742f89
commit 45cecbc145
193 changed files with 11393 additions and 2391 deletions

57
Gopkg.lock generated
View File

@ -1,5 +1,4 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
memo = "a11e1692755a705514dbd401ba4795821d1ac221d6f9100124c38a29db98c568"
[[projects]]
branch = "master"
@ -76,14 +75,14 @@
[[projects]]
name = "github.com/asdine/storm"
packages = [".","codec","codec/json","index","internal","q"]
revision = "dbd37722730b6cb703b5bd825c3f142d87358525"
version = "v2.0.0"
revision = "68fc73b635f890fe7ba2f3b15ce80c85b28a744f"
version = "v2.0.2"
[[projects]]
name = "github.com/aws/aws-sdk-go"
packages = ["aws","aws/awserr","aws/awsutil","aws/client","aws/client/metadata","aws/corehandlers","aws/credentials","aws/credentials/ec2rolecreds","aws/credentials/endpointcreds","aws/credentials/stscreds","aws/defaults","aws/ec2metadata","aws/endpoints","aws/request","aws/session","aws/signer/v4","internal/shareddefaults","private/protocol","private/protocol/query","private/protocol/query/queryutil","private/protocol/rest","private/protocol/restxml","private/protocol/xml/xmlutil","service/s3","service/sts"]
revision = "f62f7b7c5425f2b1a630932617477bdeac6dc371"
version = "v1.12.55"
revision = "fe3adbda9bc845e750e3e5767c0a14dff202b2cc"
version = "v1.12.62"
[[projects]]
branch = "master"
@ -101,7 +100,7 @@
branch = "master"
name = "github.com/bifurcation/mint"
packages = [".","syntax"]
revision = "f699e8d03646cb8e6e15410ced7bff37fcf8dddd"
revision = "350f685c15fb6b89af795dafe64fad68950948e0"
[[projects]]
name = "github.com/blang/semver"
@ -185,7 +184,7 @@
branch = "master"
name = "github.com/hashicorp/go-getter"
packages = [".","helper/url"]
revision = "994f50a6f071b07cfbea9eca9618c9674091ca51"
revision = "961f56d2e93379b7d9c578e998d09257509a6f97"
[[projects]]
branch = "master"
@ -203,7 +202,7 @@
branch = "master"
name = "github.com/hashicorp/go-plugin"
packages = ["."]
revision = "e2fbc6864d18d3c37b6cde4297ec9fca266d28f1"
revision = "e37881a3f1a07fce82b3d99ce0342a72e53386bc"
[[projects]]
branch = "master"
@ -233,7 +232,7 @@
branch = "master"
name = "github.com/hashicorp/hcl2"
packages = ["gohcl","hcl","hcl/hclsyntax","hcl/json","hcldec","hclparse"]
revision = "44bad6dbf5490f5da17ec991e664df3d017b706f"
revision = "883a81b4902ecdc60cd9d77eae4c228792827c13"
[[projects]]
branch = "master"
@ -243,9 +242,9 @@
[[projects]]
name = "github.com/hashicorp/terraform"
packages = ["config","config/configschema","config/hcl2shim","config/module","dag","flatmap","helper/hashcode","helper/hilmapstructure","helper/schema","moduledeps","plugin","plugin/discovery","registry/regsrc","registry/response","svchost","svchost/auth","svchost/disco","terraform","tfdiags","version"]
revision = "a42fdb08a43c7fabb8898fe8c286b793bbaa4835"
version = "v0.11.1"
packages = ["config","config/configschema","config/hcl2shim","config/module","dag","flatmap","helper/hashcode","helper/hilmapstructure","helper/schema","moduledeps","plugin","plugin/discovery","registry","registry/regsrc","registry/response","svchost","svchost/auth","svchost/disco","terraform","tfdiags","version"]
revision = "a6008b8a48a749c7c167453b9cf55ffd572b9a5d"
version = "v0.11.2"
[[projects]]
branch = "master"
@ -256,7 +255,8 @@
[[projects]]
name = "github.com/jmespath/go-jmespath"
packages = ["."]
revision = "0b12d6b5"
revision = "3433f3ea46d9f8019119e7dd41274e112a2359a9"
version = "0.2.2"
[[projects]]
name = "github.com/joho/godotenv"
@ -326,7 +326,7 @@
[[projects]]
name = "github.com/magefile/mage"
packages = ["mg","types"]
packages = ["build","mage","mg","parse","parse/srcimporter","sh","types"]
revision = "ab3ca2f6f85577d7ec82e0a6df721147a2e737f9"
version = "v2.0.1"
@ -382,7 +382,7 @@
branch = "master"
name = "github.com/mitchellh/mapstructure"
packages = ["."]
revision = "06020f85339e21b2478f756a78e295255ffa4d6a"
revision = "b4575eea38cca1123ec2dc90c26529b5c5acfcff"
[[projects]]
branch = "master"
@ -406,7 +406,7 @@
branch = "master"
name = "github.com/olekukonko/tablewriter"
packages = ["."]
revision = "65fec0d89a572b4367094e2058d3ebe667de3b60"
revision = "96aac992fc8b1a4c83841a6c3e7178d20d989625"
[[projects]]
name = "github.com/pkg/errors"
@ -423,8 +423,8 @@
[[projects]]
name = "github.com/satori/go.uuid"
packages = ["."]
revision = "879c5887cd475cd7864858769793b2ceb0d44feb"
version = "v1.1.0"
revision = "f58768cc1a7a7e77a3bd49e98cdd21419399b6a3"
version = "v1.2.0"
[[projects]]
branch = "master"
@ -472,7 +472,7 @@
branch = "master"
name = "github.com/zclconf/go-cty"
packages = ["cty","cty/convert","cty/function","cty/function/stdlib","cty/gocty","cty/json","cty/set"]
revision = "48ce95f3a00f37ac934ff90a62e377146f9428e1"
revision = "709e4033eeb037dc543dbc2048065dfb814ce316"
[[projects]]
name = "go.uber.org/atomic"
@ -484,19 +484,19 @@
branch = "master"
name = "golang.org/x/crypto"
packages = ["acme","acme/autocert","bcrypt","blowfish","cast5","curve25519","hkdf","nacl/secretbox","openpgp","openpgp/armor","openpgp/elgamal","openpgp/errors","openpgp/packet","openpgp/s2k","pbkdf2","poly1305","salsa20","salsa20/salsa","tea","twofish","xtea"]
revision = "0fcca4842a8d74bfddc2c96a073bd2a4d2a7a2e8"
revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["bpf","context","html","html/atom","http2","http2/hpack","idna","internal/iana","internal/socket","internal/timeseries","ipv4","lex/httplex","trace"]
revision = "d866cfc389cec985d6fda2859936a575a55a3ab6"
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "28a7276518d399b9634904daad79e18b44d481bc"
revision = "fff93fa7cd278d84afc205751523809c464168ab"
[[projects]]
branch = "master"
@ -513,18 +513,11 @@
[[projects]]
name = "google.golang.org/grpc"
packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","health","health/grpc_health_v1","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"]
revision = "f3955b8e9e244dd4dd4bc4f7b7a23a8445400a76"
version = "v1.9.0"
revision = "7cea4cc846bcf00cbb27595b07da5de875ef7de9"
version = "v1.9.1"
[[projects]]
name = "gopkg.in/alecthomas/kingpin.v2"
packages = ["."]
revision = "947dcec5ba9c011838740e680966fd7087a71d0d"
version = "v2.2.6"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "dd3b3341036bb95c8a409729fa12b897e6515c32cfaae8218cf27d60ad1a3b07"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -1,138 +0,0 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
branch = "master"
name = "github.com/Xe/gopreload"
[[constraint]]
name = "github.com/Xe/ln"
version = "0.1.0"
[[constraint]]
branch = "master"
name = "github.com/Xe/uuid"
[[constraint]]
branch = "master"
name = "github.com/Xe/x"
[[constraint]]
name = "github.com/asdine/storm"
version = "2.0.0"
[[constraint]]
branch = "master"
name = "github.com/brandur/simplebox"
[[constraint]]
name = "github.com/caarlos0/env"
version = "3.2.0"
[[constraint]]
branch = "master"
name = "github.com/dgryski/go-failure"
[[constraint]]
branch = "master"
name = "github.com/dickeyxxx/netrc"
[[constraint]]
branch = "master"
name = "github.com/facebookgo/flagenv"
[[constraint]]
branch = "master"
name = "github.com/golang/protobuf"
[[constraint]]
name = "github.com/google/gops"
version = "0.3.2"
[[constraint]]
name = "github.com/hashicorp/terraform"
version = "0.11.1"
[[constraint]]
name = "github.com/joho/godotenv"
version = "1.2.0"
[[constraint]]
branch = "master"
name = "github.com/jtolds/qod"
[[constraint]]
branch = "master"
name = "github.com/kr/pretty"
[[constraint]]
name = "github.com/lucas-clemente/quic-go"
version = "0.6.0"
[[constraint]]
name = "github.com/magefile/mage"
version = "2.0.1"
[[constraint]]
branch = "master"
name = "github.com/mtneug/pkg"
[[constraint]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
[[constraint]]
name = "github.com/pkg/errors"
version = "0.8.0"
[[constraint]]
branch = "master"
name = "github.com/streamrail/concurrent-map"
[[constraint]]
name = "github.com/xtaci/kcp-go"
version = "3.23.0"
[[constraint]]
name = "github.com/xtaci/smux"
version = "1.0.6"
[[constraint]]
name = "go.uber.org/atomic"
version = "1.3.1"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.9.0"
[[constraint]]
name = "gopkg.in/alecthomas/kingpin.v2"
version = "2.2.6"

9
cmd/mage/main.go Normal file
View File

@ -0,0 +1,9 @@
package main
import (
"os"
"github.com/magefile/mage/mage"
)
func main() { os.Exit(mage.Main()) }

View File

@ -115,6 +115,7 @@ func extractField(value *reflect.Value, field *reflect.StructField, m *structCon
switch tag {
case "id":
f.IsID = true
f.Index = tagUniqueIdx
case tagUniqueIdx, tagIdx:
f.Index = tag
case tagInline:
@ -163,6 +164,7 @@ func extractField(value *reflect.Value, field *reflect.StructField, m *structCon
if m.ID == nil && field.Name == "ID" {
if f == nil {
f = &fieldConfig{
Index: tagUniqueIdx,
Name: field.Name,
IsZero: isZero(value),
IsInteger: isInteger(value),

View File

@ -45,7 +45,7 @@ func TestExtractUniqueTags(t *testing.T) {
require.False(t, infos.ID.IsZero)
require.Equal(t, "ClassicUnique", infos.Name)
require.Len(t, allByType(infos, "index"), 0)
require.Len(t, allByType(infos, "unique"), 4)
require.Len(t, allByType(infos, "unique"), 5)
}
func TestExtractIndexTags(t *testing.T) {
@ -58,7 +58,7 @@ func TestExtractIndexTags(t *testing.T) {
require.False(t, infos.ID.IsZero)
require.Equal(t, "ClassicIndex", infos.Name)
require.Len(t, allByType(infos, "index"), 5)
require.Len(t, allByType(infos, "unique"), 0)
require.Len(t, allByType(infos, "unique"), 1)
}
func TestExtractInlineWithIndex(t *testing.T) {
@ -70,7 +70,7 @@ func TestExtractInlineWithIndex(t *testing.T) {
require.NotNil(t, infos.ID)
require.Equal(t, "ClassicInline", infos.Name)
require.Len(t, allByType(infos, "index"), 3)
require.Len(t, allByType(infos, "unique"), 2)
require.Len(t, allByType(infos, "unique"), 3)
}
func TestExtractMultipleTags(t *testing.T) {
@ -90,7 +90,7 @@ func TestExtractMultipleTags(t *testing.T) {
require.NotNil(t, infos.ID)
require.Equal(t, "User", infos.Name)
require.Len(t, allByType(infos, "index"), 2)
require.Len(t, allByType(infos, "unique"), 1)
require.Len(t, allByType(infos, "unique"), 2)
require.True(t, infos.Fields["Age"].Increment)
require.Equal(t, int64(1), infos.Fields["Age"].IncrementStart)

View File

@ -9,8 +9,8 @@ import (
"github.com/coreos/bbolt"
)
// A finder can fetch types from BoltDB
type finder interface {
// A Finder can fetch types from BoltDB.
type Finder interface {
// One returns one record by the specified index
One(fieldName string, value interface{}, to interface{}) error

View File

@ -685,3 +685,20 @@ func TestPrefix(t *testing.T) {
err = db.Prefix("Group", "group3", &users)
require.Equal(t, ErrNotFound, err)
}
func TestPrefixWithID(t *testing.T) {
db, cleanup := createDB(t)
defer cleanup()
type User struct {
ID string
}
require.NoError(t, db.Save(&User{ID: "1"}))
require.NoError(t, db.Save(&User{ID: "10"}))
var users []User
require.NoError(t, db.Prefix("ID", "1", &users))
require.Len(t, users, 2)
}

View File

@ -6,8 +6,8 @@ import (
"github.com/coreos/bbolt"
)
// keyValueStore can store and fetch values by key
type keyValueStore interface {
// KeyValueStore can store and fetch values by key
type KeyValueStore interface {
// Get a value from a bucket
Get(bucketName string, key interface{}, to interface{}) error
// Set a key/value pair into a bucket

View File

@ -7,10 +7,11 @@ import (
// A Node in Storm represents the API to a BoltDB bucket.
type Node interface {
tx
typeStore
keyValueStore
bucketScanner
Tx
TypeStore
KeyValueStore
BucketScanner
// From returns a new Storm node with a new bucket root below the current.
// All DB operations on the new node will be executed relative to this bucket.
From(addend ...string) Node

View File

@ -6,8 +6,8 @@ import (
"github.com/coreos/bbolt"
)
// A bucketScanner scans a Node for a list of buckets
type bucketScanner interface {
// A BucketScanner scans a Node for a list of buckets
type BucketScanner interface {
// PrefixScan scans the root buckets for keys matching the given prefix.
PrefixScan(prefix string) []Node
// PrefixScan scans the buckets in this node for keys matching the given prefix.

View File

@ -9,9 +9,9 @@ import (
"github.com/coreos/bbolt"
)
// typeStore stores user defined types in BoltDB
type typeStore interface {
finder
// TypeStore stores user defined types in BoltDB.
type TypeStore interface {
Finder
// Init creates the indexes and buckets for a given structure
Init(data interface{}) error

View File

@ -2,8 +2,8 @@ package storm
import "github.com/coreos/bbolt"
// tx is a transaction
type tx interface {
// Tx is a transaction.
type Tx interface {
// Commit writes all changes to disk.
Commit() error

View File

@ -1,3 +1,65 @@
Release v1.12.62 (2018-01-15)
===
### Service Client Updates
* `aws/endpoints`: Updated Regions and Endpoints metadata.
* `service/lambda`: Updates service API and documentation
* Support for creating Lambda Functions using 'dotnetcore2.0' and 'go1.x'.
Release v1.12.61 (2018-01-12)
===
### Service Client Updates
* `service/glue`: Updates service API and documentation
* Support is added to generate ETL scripts in Scala which can now be run by AWS Glue ETL jobs. In addition, the trigger API now supports firing when any conditions are met (in addition to all conditions). Also, jobs can be triggered based on a "failed" or "stopped" job run (in addition to a "succeeded" job run).
Release v1.12.60 (2018-01-11)
===
### Service Client Updates
* `service/elasticloadbalancing`: Updates service API and documentation
* `service/elasticloadbalancingv2`: Updates service API and documentation
* `service/rds`: Updates service API and documentation
* Read Replicas for Amazon RDS for MySQL, MariaDB, and PostgreSQL now support Multi-AZ deployments.Amazon RDS Read Replicas enable you to create one or more read-only copies of your database instance within the same AWS Region or in a different AWS Region. Updates made to the source database are asynchronously copied to the Read Replicas. In addition to providing scalability for read-heavy workloads, you can choose to promote a Read Replica to become standalone a DB instance when needed.Amazon RDS Multi-AZ Deployments provide enhanced availability for database instances within a single AWS Region. With Multi-AZ, your data is synchronously replicated to a standby in a different Availability Zone (AZ). In case of an infrastructure failure, Amazon RDS performs an automatic failover to the standby, minimizing disruption to your applications.You can now combine Read Replicas with Multi-AZ as part of a disaster recovery strategy for your production databases. A well-designed and tested plan is critical for maintaining business continuity after a disaster. Since Read Replicas can also be created in different regions than the source database, your Read Replica can be promoted to become the new production database in case of a regional disruption.You can also combine Read Replicas with Multi-AZ for your database engine upgrade process. You can create a Read Replica of your production database instance and upgrade it to a new database engine version. When the upgrade is complete, you can stop applications, promote the Read Replica to a standalone database instance and switch over your applications. Since the database instance is already a Multi-AZ deployment, no additional steps are needed.For more information, see the Amazon RDS User Guide.
* `service/ssm`: Updates service documentation
* Updates documentation for the HierarchyLevelLimitExceededException error.
Release v1.12.59 (2018-01-09)
===
### Service Client Updates
* `service/kms`: Updates service documentation
* Documentation updates for AWS KMS
Release v1.12.58 (2018-01-09)
===
### Service Client Updates
* `service/ds`: Updates service API and documentation
* On October 24 we introduced AWS Directory Service for Microsoft Active Directory (Standard Edition), also known as AWS Microsoft AD (Standard Edition), which is a managed Microsoft Active Directory (AD) that is optimized for small and midsize businesses (SMBs). With this SDK release, you can now create an AWS Microsoft AD directory using API. This enables you to run typical SMB workloads using a cost-effective, highly available, and managed Microsoft AD in the AWS Cloud.
Release v1.12.57 (2018-01-08)
===
### Service Client Updates
* `service/codedeploy`: Updates service API and documentation
* The AWS CodeDeploy API was updated to support DeleteGitHubAccountToken, a new method that deletes a GitHub account connection.
* `service/discovery`: Updates service API and documentation
* Documentation updates for AWS Application Discovery Service.
* `service/route53`: Updates service API and documentation
* This release adds an exception to the CreateTrafficPolicyVersion API operation.
Release v1.12.56 (2018-01-05)
===
### Service Client Updates
* `service/inspector`: Updates service API, documentation, and examples
* Added 2 new attributes to the DescribeAssessmentTemplate response, indicating the total number of assessment runs and last assessment run ARN (if present.)
* `service/snowball`: Updates service documentation
* Documentation updates for snowball
* `service/ssm`: Updates service documentation
* Documentation updates for ssm
Release v1.12.55 (2018-01-02)
===

View File

@ -740,6 +740,7 @@ var awsPartition = partition{
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-2": endpoint{},
"eu-west-3": endpoint{},
"sa-east-1": endpoint{},
"us-east-1": endpoint{},
"us-east-2": endpoint{},
@ -1636,6 +1637,7 @@ var awsPartition = partition{
"ap-northeast-2": endpoint{},
"ap-south-1": endpoint{},
"ap-southeast-2": endpoint{},
"ca-central-1": endpoint{},
"eu-central-1": endpoint{},
"eu-west-1": endpoint{},
"eu-west-3": endpoint{},

View File

@ -5,4 +5,4 @@ package aws
const SDKName = "aws-sdk-go"
// SDKVersion is the version of this SDK
const SDKVersion = "1.12.55"
const SDKVersion = "1.12.62"

View File

@ -103,6 +103,7 @@ func (state ClientStateStart) Next(hr handshakeMessageReader) (HandshakeState, [
// Construct base ClientHello
ch := &ClientHelloBody{
LegacyVersion: wireVersion(state.hsCtx.hIn),
CipherSuites: state.Caps.CipherSuites,
}
_, err := prng.Read(ch.Random[:])

View File

@ -9,6 +9,7 @@ const (
supportedVersion uint16 = 0x7f16 // draft-22
tls12Version uint16 = 0x0303
tls10Version uint16 = 0x0301
dtls12WireVersion uint16 = 0xfefd
)
var (

View File

@ -1,7 +1,28 @@
package mint
import (
"fmt"
)
// This file is a placeholder. DTLS-specific stuff (timer management,
// ACKs, retransmits, etc. will eventually go here.
const (
initialMtu = 1200
)
func wireVersion(h *HandshakeLayer) uint16 {
if h.datagram {
return dtls12WireVersion
}
return tls12Version
}
func dtlsConvertVersion(version uint16) uint16 {
if version == tls12Version {
return dtls12WireVersion
}
if version == tls10Version {
return 0xfeff
}
panic(fmt.Sprintf("Internal error, unexpected version=%d", version))
}

View File

@ -77,8 +77,6 @@ func (hm HandshakeMessage) ToBody() (HandshakeMessageBody, error) {
body = new(ClientHelloBody)
case HandshakeTypeServerHello:
body = new(ServerHelloBody)
case HandshakeTypeHelloRetryRequest:
body = new(HelloRetryRequestBody)
case HandshakeTypeEncryptedExtensions:
body = new(EncryptedExtensionsBody)
case HandshakeTypeCertificate:

View File

@ -25,14 +25,14 @@ type HandshakeMessageBody interface {
// Extension extensions<0..2^16-1>;
// } ClientHello;
type ClientHelloBody struct {
// Omitted: clientVersion
LegacyVersion uint16
Random [32]byte
LegacySessionID []byte
CipherSuites []CipherSuite
Extensions ExtensionList
}
type clientHelloBodyInner struct {
type clientHelloBodyInnerTLS struct {
LegacyVersion uint16
Random [32]byte
LegacySessionID []byte `tls:"head=1,max=32"`
@ -41,41 +41,86 @@ type clientHelloBodyInner struct {
Extensions []Extension `tls:"head=2"`
}
type clientHelloBodyInnerDTLS struct {
LegacyVersion uint16
Random [32]byte
LegacySessionID []byte `tls:"head=1,max=32"`
EmptyCookie uint8
CipherSuites []CipherSuite `tls:"head=2,min=2"`
LegacyCompressionMethods []byte `tls:"head=1,min=1"`
Extensions []Extension `tls:"head=2"`
}
func (ch ClientHelloBody) Type() HandshakeType {
return HandshakeTypeClientHello
}
func (ch ClientHelloBody) Marshal() ([]byte, error) {
return syntax.Marshal(clientHelloBodyInner{
LegacyVersion: tls12Version,
if ch.LegacyVersion == tls12Version {
return syntax.Marshal(clientHelloBodyInnerTLS{
LegacyVersion: ch.LegacyVersion,
Random: ch.Random,
LegacySessionID: []byte{},
CipherSuites: ch.CipherSuites,
LegacyCompressionMethods: []byte{0},
Extensions: ch.Extensions,
})
} else {
return syntax.Marshal(clientHelloBodyInnerDTLS{
LegacyVersion: ch.LegacyVersion,
Random: ch.Random,
LegacySessionID: []byte{},
CipherSuites: ch.CipherSuites,
LegacyCompressionMethods: []byte{0},
Extensions: ch.Extensions,
})
}
}
func (ch *ClientHelloBody) Unmarshal(data []byte) (int, error) {
var inner clientHelloBodyInner
read, err := syntax.Unmarshal(data, &inner)
var read int
var err error
// Note that this might be 0, in which case we do TLS. That
// makes the tests easier.
if ch.LegacyVersion != dtls12WireVersion {
var inner clientHelloBodyInnerTLS
read, err = syntax.Unmarshal(data, &inner)
if err != nil {
return 0, err
}
// We are strict about these things because we only support 1.3
if inner.LegacyVersion != tls12Version {
return 0, fmt.Errorf("tls.clienthello: Incorrect version number")
}
if len(inner.LegacyCompressionMethods) != 1 || inner.LegacyCompressionMethods[0] != 0 {
return 0, fmt.Errorf("tls.clienthello: Invalid compression method")
}
ch.LegacyVersion = inner.LegacyVersion
ch.Random = inner.Random
ch.LegacySessionID = inner.LegacySessionID
ch.CipherSuites = inner.CipherSuites
ch.Extensions = inner.Extensions
} else {
var inner clientHelloBodyInnerDTLS
read, err = syntax.Unmarshal(data, &inner)
if err != nil {
return 0, err
}
if inner.EmptyCookie != 0 {
return 0, fmt.Errorf("tls.clienthello: Invalid cookie")
}
if len(inner.LegacyCompressionMethods) != 1 || inner.LegacyCompressionMethods[0] != 0 {
return 0, fmt.Errorf("tls.clienthello: Invalid compression method")
}
ch.LegacyVersion = inner.LegacyVersion
ch.Random = inner.Random
ch.LegacySessionID = inner.LegacySessionID
ch.CipherSuites = inner.CipherSuites
ch.Extensions = inner.Extensions
}
return read, nil
}
@ -120,29 +165,6 @@ func (ch ClientHelloBody) Truncated() ([]byte, error) {
return chData[:chLen-binderLen], nil
}
// struct {
// ProtocolVersion server_version;
// CipherSuite cipher_suite;
// Extension extensions<2..2^16-1>;
// } HelloRetryRequest;
type HelloRetryRequestBody struct {
Version uint16
CipherSuite CipherSuite
Extensions ExtensionList `tls:"head=2,min=2"`
}
func (hrr HelloRetryRequestBody) Type() HandshakeType {
return HandshakeTypeHelloRetryRequest
}
func (hrr HelloRetryRequestBody) Marshal() ([]byte, error) {
return syntax.Marshal(hrr)
}
func (hrr *HelloRetryRequestBody) Unmarshal(data []byte) (int, error) {
return syntax.Unmarshal(data, hrr)
}
// struct {
// ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */
// Random random;

View File

@ -34,6 +34,7 @@ var (
0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37}
chCipherSuites = []CipherSuite{0x0001, 0x0002, 0x0003}
chValidIn = ClientHelloBody{
LegacyVersion: tls12Version,
Random: helloRandom,
CipherSuites: chCipherSuites,
Extensions: extListValidIn,
@ -49,6 +50,7 @@ var (
chTruncHex = "01000062" + "0303" + hex.EncodeToString(helloRandom[:]) +
"00" + "0006000100020003" + "0100" + "00330029002f000a00040102030405060708"
chTruncValid = ClientHelloBody{
LegacyVersion: tls12Version,
Random: helloRandom,
CipherSuites: chCipherSuites,
Extensions: []Extension{
@ -60,11 +62,13 @@ var (
}
chTruncInvalid = ClientHelloBody{}
chTruncNoExt = ClientHelloBody{
LegacyVersion: tls12Version,
Random: helloRandom,
CipherSuites: chCipherSuites,
Extensions: []Extension{},
}
chTruncNoPSK = ClientHelloBody{
LegacyVersion: tls12Version,
Random: helloRandom,
CipherSuites: chCipherSuites,
Extensions: []Extension{
@ -72,6 +76,7 @@ var (
},
}
chTruncBadPSK = ClientHelloBody{
LegacyVersion: tls12Version,
Random: helloRandom,
CipherSuites: chCipherSuites,
Extensions: []Extension{
@ -79,16 +84,6 @@ var (
},
}
// HelloRetryRequest test cases
hrrValidIn = HelloRetryRequestBody{
Version: supportedVersion,
CipherSuite: 0x0001,
Extensions: extListValidIn,
}
hrrEmptyIn = HelloRetryRequestBody{}
hrrValidHex = supportedVersionHex + "0001" + extListValidHex
hrrEmptyHex = supportedVersionHex + "0001" + "0000"
// ServerHello test cases
shValidIn = ServerHelloBody{
Version: tls12Version,
@ -290,12 +285,6 @@ func TestClientHelloMarshalUnmarshal(t *testing.T) {
_, err = ch.Unmarshal(chValid[:fixedClientHelloBodyLen-1])
assertError(t, err, "Unmarshaled a ClientHello below the min length")
// Test unmarshal failure on wrong version
chValid[1]--
_, err = ch.Unmarshal(chValid)
assertError(t, err, "Unmarshaled a ClientHello with the wrong version")
chValid[1]++
// Test unmarshal failure on ciphersuite size overflow
chValid[35] = 0xFF
_, err = ch.Unmarshal(chValid)
@ -352,34 +341,6 @@ func TestClientHelloTruncate(t *testing.T) {
assertError(t, err, "Truncated a ClientHello with a mal-formed PSK")
}
func TestHelloRetryRequestMarshalUnmarshal(t *testing.T) {
hrrValid := unhex(hrrValidHex)
hrrEmpty := unhex(hrrEmptyHex)
// Test correctness of handshake type
assertEquals(t, (HelloRetryRequestBody{}).Type(), HandshakeTypeHelloRetryRequest)
// Test successful marshal
out, err := hrrValidIn.Marshal()
assertNotError(t, err, "Failed to marshal a valid HelloRetryRequest")
assertByteEquals(t, out, hrrValid)
// Test marshal failure with no extensions present
out, err = hrrEmptyIn.Marshal()
assertError(t, err, "Marshaled HelloRetryRequest with no extensions")
// Test successful unmarshal
var hrr HelloRetryRequestBody
read, err := hrr.Unmarshal(hrrValid)
assertNotError(t, err, "Failed to unmarshal a valid HelloRetryRequest")
assertEquals(t, read, len(hrrValid))
assertDeepEquals(t, hrr, hrrValidIn)
// Test unmarshal failure with no extensions present
read, err = hrr.Unmarshal(hrrEmpty)
assertError(t, err, "Unmarshaled a HelloRetryRequest with no extensions")
}
func TestServerHelloMarshalUnmarshal(t *testing.T) {
shValid := unhex(shValidHex)
shEmpty := unhex(shEmptyHex)

View File

@ -119,6 +119,15 @@ func (r *RecordLayer) Rekey(epoch Epoch, factory aeadFactory, key []byte, iv []b
return nil
}
func (c *cipherState) formatSeq(datagram bool) []byte {
seq := append([]byte{}, c.seq...)
if datagram {
seq[0] = byte(c.epoch >> 8)
seq[1] = byte(c.epoch & 0xff)
}
return seq
}
func (c *cipherState) computeNonce(seq []byte) []byte {
nonce := make([]byte, len(c.iv))
copy(nonce, c.iv)
@ -143,9 +152,9 @@ func (c *cipherState) incrementSequenceNumber() {
if i < 0 {
// Not allowed to let sequence number wrap.
// Instead, must renegotiate before it does.
// Not likely enough to bothec.
// Not likely enough to bother.
// TODO(ekr@rtfm.com): Check for DTLS here
// because the limit is soonec.
// because the limit is sooner.
panic("TLS: sequence number wraparound")
}
}
@ -157,7 +166,8 @@ func (c *cipherState) overhead() int {
return c.cipher.Overhead()
}
func (r *RecordLayer) encrypt(cipher *cipherState, pt *TLSPlaintext, padLen int) *TLSPlaintext {
func (r *RecordLayer) encrypt(cipher *cipherState, seq []byte, pt *TLSPlaintext, padLen int) *TLSPlaintext {
logf(logTypeIO, "Encrypt seq=[%x]", seq)
// Expand the fragment to hold contentType, padding, and overhead
originalLen := len(pt.fragment)
plaintextLen := originalLen + 1 + padLen
@ -165,6 +175,7 @@ func (r *RecordLayer) encrypt(cipher *cipherState, pt *TLSPlaintext, padLen int)
// Assemble the revised plaintext
out := &TLSPlaintext{
contentType: RecordTypeApplicationData,
fragment: make([]byte, ciphertextLen),
}
@ -176,11 +187,12 @@ func (r *RecordLayer) encrypt(cipher *cipherState, pt *TLSPlaintext, padLen int)
// Encrypt the fragment
payload := out.fragment[:plaintextLen]
cipher.cipher.Seal(payload[:0], cipher.computeNonce(cipher.seq), payload, nil)
cipher.cipher.Seal(payload[:0], cipher.computeNonce(seq), payload, nil)
return out
}
func (r *RecordLayer) decrypt(pt *TLSPlaintext, seq []byte) (*TLSPlaintext, int, error) {
logf(logTypeIO, "Decrypt seq=[%x]", seq)
if len(pt.fragment) < r.cipher.overhead() {
msg := fmt.Sprintf("tls.record.decrypt: Record too short [%d] < [%d]", len(pt.fragment), r.cipher.overhead())
return nil, 0, DecryptError(msg)
@ -312,6 +324,8 @@ func (r *RecordLayer) nextRecord() (*TLSPlaintext, error) {
if r.datagram {
seq = header[3:11]
}
// TODO(ekr@rtfm.com): Handle the wrong epoch.
// TODO(ekr@rtfm.com): Handle duplicates.
logf(logTypeIO, "RecordLayer.ReadRecord epoch=[%s] seq=[%x] [%d] ciphertext=[%x]", cipher.epoch.label(), seq, pt.contentType, pt.fragment)
pt, _, err = r.decrypt(pt, seq)
if err != nil {
@ -341,9 +355,11 @@ func (r *RecordLayer) WriteRecordWithPadding(pt *TLSPlaintext, padLen int) error
}
func (r *RecordLayer) writeRecordWithPadding(pt *TLSPlaintext, cipher *cipherState, padLen int) error {
seq := cipher.formatSeq(r.datagram)
if cipher.cipher != nil {
logf(logTypeIO, "RecordLayer.WriteRecord epoch=[%s] seq=[%x] [%d] plaintext=[%x]", cipher.epoch.label(), cipher.seq, pt.contentType, pt.fragment)
pt = r.encrypt(cipher, pt, padLen)
pt = r.encrypt(cipher, seq, pt, padLen)
} else if padLen > 0 {
return fmt.Errorf("tls.record: Padding can only be done on encrypted records")
}
@ -354,16 +370,17 @@ func (r *RecordLayer) writeRecordWithPadding(pt *TLSPlaintext, cipher *cipherSta
length := len(pt.fragment)
var header []byte
if !r.datagram {
header = []byte{byte(pt.contentType),
byte(r.version >> 8), byte(r.version & 0xff),
byte(length >> 8), byte(length)}
} else {
// TODO(ekr@rtfm.com): Double check version
seq := cipher.seq
header = []byte{byte(pt.contentType), 0xfe, 0xff,
0x00, 0x00, // TODO(ekr@rtfm.com): double-check epoch
seq[2], seq[3], seq[4], seq[5], seq[6], seq[7],
version := dtlsConvertVersion(r.version)
header = []byte{byte(pt.contentType),
byte(version >> 8), byte(version & 0xff),
seq[0], seq[1], seq[2], seq[3],
seq[4], seq[5], seq[6], seq[7],
byte(length >> 8), byte(length)}
}
record := append(header, pt.fragment...)

View File

@ -256,7 +256,9 @@ func TestReadWriteDTLS(t *testing.T) {
b := bytes.NewBuffer(nil)
out := NewRecordLayerDTLS(b)
out.SetVersion(tls12Version)
in := NewRecordLayerDTLS(b)
in.SetVersion(tls12Version)
// Unencrypted
ptIn := &TLSPlaintext{

View File

@ -92,12 +92,18 @@ func (state ServerStateStart) Next(hr handshakeMessageReader) (HandshakeState, [
return nil, nil, AlertUnexpectedMessage
}
ch := &ClientHelloBody{}
ch := &ClientHelloBody{LegacyVersion: wireVersion(state.hsCtx.hIn)}
if err := safeUnmarshal(ch, hm.body); err != nil {
logf(logTypeHandshake, "[ServerStateStart] Error decoding message: %v", err)
return nil, nil, AlertDecodeError
}
// We are strict about these things because we only support 1.3
if ch.LegacyVersion != wireVersion(state.hsCtx.hIn) {
logf(logTypeHandshake, "[ServerStateStart] Invalid version number: %v", ch.LegacyVersion)
return nil, nil, AlertDecodeError
}
clientHello := hm
connParams := ConnectionParameters{}

View File

@ -93,6 +93,7 @@ func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config) (*
if config != nil && config.NonBlocking {
return nil, errors.New("dialing not possible in non-blocking mode")
}
// We want the Timeout and Deadline values from dialer to cover the
// whole process: TCP connection and TLS handshake. This means that we
// also need to start our own timers now.
@ -127,16 +128,20 @@ func DialWithDialer(dialer *net.Dialer, network, addr string, config *Config) (*
if config == nil {
config = &Config{}
} else {
config = config.Clone()
}
// If no ServerName is set, infer the ServerName
// from the hostname we're connecting to.
if config.ServerName == "" {
// Make a copy to avoid polluting argument or default.
c := config.Clone()
c.ServerName = hostname
config = c
config.ServerName = hostname
}
// Set up DTLS as needed.
config.UseDTLS = (network == "udp")
conn := Client(rawConn, config)
if timeout == 0 {

View File

@ -285,7 +285,7 @@ be used automatically.
* `aws_access_key_id` (required) - Minio access key.
* `aws_access_key_secret` (required) - Minio access key secret.
* `region` (optional - defaults to us-east-1) - Region identifier to use.
* `version` (optional - fefaults to Minio default) - Configuration file format.
* `version` (optional - defaults to Minio default) - Configuration file format.
#### S3 Bucket Examples

View File

@ -66,6 +66,10 @@ type ServeConfig struct {
// the gRPC health checking service. This is not optional since go-plugin
// relies on this to implement Ping().
GRPCServer func([]grpc.ServerOption) *grpc.Server
// Logger is used to pass a logger into the server. If none is provided the
// server will create a default logger.
Logger hclog.Logger
}
// Protocol returns the protocol that this server should speak.
@ -106,12 +110,15 @@ func Serve(opts *ServeConfig) {
// Logging goes to the original stderr
log.SetOutput(os.Stderr)
logger := opts.Logger
if logger == nil {
// internal logger to os.Stderr
logger := hclog.New(&hclog.LoggerOptions{
logger = hclog.New(&hclog.LoggerOptions{
Level: hclog.Trace,
Output: os.Stderr,
JSONFormat: true,
})
}
// Create our new stdout, stderr files. These will override our built-in
// stdout/stderr so that it works across the stream boundary.

View File

@ -2,7 +2,6 @@ package hcl
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
@ -43,7 +42,7 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
return errors.New("nil diagnostic")
}
var colorCode, resetCode string
var colorCode, highlightCode, resetCode string
if w.color {
switch diag.Severity {
case DiagError:
@ -52,6 +51,7 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
colorCode = "\x1b[33m"
}
resetCode = "\x1b[0m"
highlightCode = "\x1b[1;4m"
}
var severityStr string
@ -68,24 +68,31 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
fmt.Fprintf(w.wr, "%s%s%s: %s\n\n", colorCode, severityStr, resetCode, diag.Summary)
if diag.Subject != nil {
snipRange := *diag.Subject
highlightRange := snipRange
if diag.Context != nil {
// Show enough of the source code to include both the subject
// and context ranges, which overlap in all reasonable
// situations.
snipRange = RangeOver(snipRange, *diag.Context)
}
// We can't illustrate an empty range, so we'll turn such ranges into
// single-character ranges, which might not be totally valid (may point
// off the end of a line, or off the end of the file) but are good
// enough for the bounds checks we do below.
if snipRange.Empty() {
snipRange.End.Byte++
snipRange.End.Column++
}
if highlightRange.Empty() {
highlightRange.End.Byte++
highlightRange.End.Column++
}
file := w.files[diag.Subject.Filename]
if file == nil || file.Bytes == nil {
fmt.Fprintf(w.wr, " on %s line %d:\n (source code not available)\n\n", diag.Subject.Filename, diag.Subject.Start.Line)
} else {
src := file.Bytes
r := bytes.NewReader(src)
sc := bufio.NewScanner(r)
sc.Split(bufio.ScanLines)
var startLine, endLine int
if diag.Context != nil {
startLine = diag.Context.Start.Line
endLine = diag.Context.End.Line
} else {
startLine = diag.Subject.Start.Line
endLine = diag.Subject.End.Line
}
var contextLine string
if diag.Subject != nil {
@ -95,35 +102,33 @@ func (w *diagnosticTextWriter) WriteDiagnostic(diag *Diagnostic) error {
}
}
li := 1
var ls string
for sc.Scan() {
ls = sc.Text()
if li == startLine {
break
}
li++
}
fmt.Fprintf(w.wr, " on %s line %d%s:\n", diag.Subject.Filename, diag.Subject.Start.Line, contextLine)
// TODO: Generate markers for the specific characters that are in the Context and Subject ranges.
// For now, we just print out the lines.
src := file.Bytes
sc := NewRangeScanner(src, diag.Subject.Filename, bufio.ScanLines)
fmt.Fprintf(w.wr, "%4d: %s\n", li, ls)
if endLine > li {
for sc.Scan() {
ls = sc.Text()
li++
fmt.Fprintf(w.wr, "%4d: %s\n", li, ls)
if li == endLine {
break
lineRange := sc.Range()
if !lineRange.Overlaps(snipRange) {
continue
}
beforeRange, highlightedRange, afterRange := lineRange.PartitionAround(highlightRange)
if highlightedRange.Empty() {
fmt.Fprintf(w.wr, "%4d: %s\n", lineRange.Start.Line, sc.Bytes())
} else {
before := beforeRange.SliceBytes(src)
highlighted := highlightedRange.SliceBytes(src)
after := afterRange.SliceBytes(src)
fmt.Fprintf(
w.wr, "%4d: %s%s%s%s%s\n",
lineRange.Start.Line,
before,
highlightCode, highlighted, resetCode,
after,
)
}
}
w.wr.Write([]byte{'\n'})

View File

@ -45,10 +45,12 @@ All splines must be pre-reticulated.
Detail: `"baz" is not a supported top-level attribute. Did you mean "bam"?`,
Subject: &Range{
Start: Pos{
Byte: 16,
Column: 1,
Line: 3,
},
End: Pos{
Byte: 19,
Column: 4,
Line: 3,
},
@ -71,10 +73,12 @@ attribute. Did you mean "bam"?
Detail: `"pizza" is not a supported attribute. Did you mean "pizzetta"?`,
Subject: &Range{
Start: Pos{
Byte: 42,
Column: 3,
Line: 5,
},
End: Pos{
Byte: 47,
Column: 8,
Line: 5,
},
@ -83,10 +87,12 @@ attribute. Did you mean "bam"?
// whether we're able to show a multi-line context when needed.
Context: &Range{
Start: Pos{
Byte: 24,
Column: 1,
Line: 4,
},
End: Pos{
Byte: 60,
Column: 2,
Line: 6,
},

30
vendor/github.com/hashicorp/hcl2/hcl/expr_list.go generated vendored Normal file
View File

@ -0,0 +1,30 @@
package hcl
// ExprList tests if the given expression is a static list construct and,
// if so, extracts the expressions that represent the list elements.
// If the given expression is not a static list, error diagnostics are
// returned.
//
// A particular Expression implementation can support this function by
// offering a method called ExprList that takes no arguments and returns
// []Expression. This method should return nil if a static list cannot
// be extracted.
func ExprList(expr Expression) ([]Expression, Diagnostics) {
type exprList interface {
ExprList() []Expression
}
if exL, supported := expr.(exprList); supported {
if list := exL.ExprList(); list != nil {
return list, nil
}
}
return nil, Diagnostics{
&Diagnostic{
Severity: DiagError,
Summary: "Invalid expression",
Detail: "A static list expression is required.",
Subject: expr.StartRange().Ptr(),
},
}
}

View File

@ -70,6 +70,11 @@ func (e *ScopeTraversalExpr) StartRange() hcl.Range {
return e.SrcRange
}
// Implementation for hcl.AbsTraversalForExpr.
func (e *ScopeTraversalExpr) AsTraversal() hcl.Traversal {
return e.Traversal
}
// RelativeTraversalExpr is an Expression that retrieves a value from another
// value using a _relative_ traversal.
type RelativeTraversalExpr struct {
@ -539,6 +544,15 @@ func (e *TupleConsExpr) StartRange() hcl.Range {
return e.OpenRange
}
// Implementation for hcl.ExprList
func (e *TupleConsExpr) ExprList() []hcl.Expression {
ret := make([]hcl.Expression, len(e.Exprs))
for i, expr := range e.Exprs {
ret[i] = expr
}
return ret
}
type ObjectConsExpr struct {
Items []ObjectConsItem

View File

@ -1087,3 +1087,35 @@ func TestFunctionCallExprValue(t *testing.T) {
})
}
}
func TestExpressionAsTraversal(t *testing.T) {
expr, _ := ParseExpression([]byte("a.b[0]"), "", hcl.Pos{})
traversal, diags := hcl.AbsTraversalForExpr(expr)
if len(diags) != 0 {
t.Fatalf("unexpected diagnostics")
}
if len(traversal) != 3 {
t.Fatalf("wrong traversal %#v; want length 3", traversal)
}
if traversal.RootName() != "a" {
t.Fatalf("wrong root name %q; want %q", traversal.RootName(), "a")
}
}
func TestStaticExpressionList(t *testing.T) {
expr, _ := ParseExpression([]byte("[0, a, true]"), "", hcl.Pos{})
exprs, diags := hcl.ExprList(expr)
if len(diags) != 0 {
t.Fatalf("unexpected diagnostics")
}
if len(exprs) != 3 {
t.Fatalf("wrong result %#v; want length 3", exprs)
}
first, ok := exprs[0].(*LiteralValueExpr)
if !ok {
t.Fatalf("first expr has wrong type %T; want *zclsyntax.LiteralValueExpr", exprs[0])
}
if !first.Val.RawEquals(cty.Zero) {
t.Fatalf("wrong first value %#v; want cty.Zero", first.Val)
}
}

View File

@ -3,8 +3,8 @@ package json
import (
"fmt"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hcl2/hcl/hclsyntax"
"github.com/zclconf/go-cty/cty"
)
@ -330,8 +330,26 @@ func (e *expression) Variables() []hcl.Traversal {
switch v := e.src.(type) {
case *stringVal:
// FIXME: Once the native zcl template language parser is implemented,
// parse with that and look for variables in there too,
templateSrc := v.Value
expr, diags := hclsyntax.ParseTemplate(
[]byte(templateSrc),
v.SrcRange.Filename,
// This won't produce _exactly_ the right result, since
// the zclsyntax parser can't "see" any escapes we removed
// while parsing JSON, but it's better than nothing.
hcl.Pos{
Line: v.SrcRange.Start.Line,
// skip over the opening quote mark
Byte: v.SrcRange.Start.Byte + 1,
Column: v.SrcRange.Start.Column + 1,
},
)
if diags.HasErrors() {
return vars
}
return expr.Variables()
case *arrayVal:
for _, jsonVal := range v.Values {
@ -353,3 +371,34 @@ func (e *expression) Range() hcl.Range {
func (e *expression) StartRange() hcl.Range {
return e.src.StartRange()
}
// Implementation for hcl.AbsTraversalForExpr.
func (e *expression) AsTraversal() hcl.Traversal {
// In JSON-based syntax a traversal is given as a string containing
// traversal syntax as defined by hclsyntax.ParseTraversalAbs.
switch v := e.src.(type) {
case *stringVal:
traversal, diags := hclsyntax.ParseTraversalAbs([]byte(v.Value), v.SrcRange.Filename, v.SrcRange.Start)
if diags.HasErrors() {
return nil
}
return traversal
default:
return nil
}
}
// Implementation for hcl.ExprList.
func (e *expression) ExprList() []hcl.Expression {
switch v := e.src.(type) {
case *arrayVal:
ret := make([]hcl.Expression, len(v.Values))
for i, node := range v.Values {
ret[i] = &expression{src: node}
}
return ret
default:
return nil
}
}

View File

@ -741,3 +741,34 @@ func TestJustAttributes(t *testing.T) {
})
}
}
func TestExpressionAsTraversal(t *testing.T) {
e := &expression{
src: &stringVal{
Value: "foo.bar[0]",
},
}
traversal := e.AsTraversal()
if len(traversal) != 3 {
t.Fatalf("incorrect traversal %#v; want length 3", traversal)
}
}
func TestStaticExpressionList(t *testing.T) {
e := &expression{
src: &arrayVal{
Values: []node{
&stringVal{
Value: "hello",
},
},
},
}
exprs := e.ExprList()
if len(exprs) != 1 {
t.Fatalf("incorrect exprs %#v; want length 1", exprs)
}
if exprs[0].(*expression).src != e.src.(*arrayVal).Values[0] {
t.Fatalf("wrong first expression node")
}
}

View File

@ -60,6 +60,40 @@ func RangeBetween(start, end Range) Range {
}
}
// RangeOver returns a new range that covers both of the given ranges and
// possibly additional content between them if the two ranges do not overlap.
//
// If either range is empty then it is ignored. The result is empty if both
// given ranges are empty.
//
// The result is meaningless if the two ranges to not belong to the same
// source file.
func RangeOver(a, b Range) Range {
if a.Empty() {
return b
}
if b.Empty() {
return a
}
var start, end Pos
if a.Start.Byte < b.Start.Byte {
start = a.Start
} else {
start = b.Start
}
if a.End.Byte > b.End.Byte {
end = a.End
} else {
end = b.End
}
return Range{
Filename: a.Filename,
Start: start,
End: end,
}
}
// ContainsOffset returns true if and only if the given byte offset is within
// the receiving Range.
func (r Range) ContainsOffset(offset int) bool {
@ -94,3 +128,135 @@ func (r Range) String() string {
)
}
}
func (r Range) Empty() bool {
return r.Start.Byte == r.End.Byte
}
// CanSliceBytes returns true if SliceBytes could return an accurate
// sub-slice of the given slice.
//
// This effectively tests whether the start and end offsets of the range
// are within the bounds of the slice, and thus whether SliceBytes can be
// trusted to produce an accurate start and end position within that slice.
func (r Range) CanSliceBytes(b []byte) bool {
switch {
case r.Start.Byte < 0 || r.Start.Byte > len(b):
return false
case r.End.Byte < 0 || r.End.Byte > len(b):
return false
case r.End.Byte < r.Start.Byte:
return false
default:
return true
}
}
// SliceBytes returns a sub-slice of the given slice that is covered by the
// receiving range, assuming that the given slice is the source code of the
// file indicated by r.Filename.
//
// If the receiver refers to any byte offsets that are outside of the slice
// then the result is constrained to the overlapping portion only, to avoid
// a panic. Use CanSliceBytes to determine if the result is guaranteed to
// be an accurate span of the requested range.
func (r Range) SliceBytes(b []byte) []byte {
start := r.Start.Byte
end := r.End.Byte
if start < 0 {
start = 0
} else if start > len(b) {
start = len(b)
}
if end < 0 {
end = 0
} else if end > len(b) {
end = len(b)
}
if end < start {
end = start
}
return b[start:end]
}
// Overlaps returns true if the receiver and the other given range share any
// characters in common.
func (r Range) Overlaps(other Range) bool {
switch {
case r.Filename != other.Filename:
// If the ranges are in different files then they can't possibly overlap
return false
case r.Empty() || other.Empty():
// Empty ranges can never overlap
return false
case r.ContainsOffset(other.Start.Byte) || r.ContainsOffset(other.End.Byte):
return true
case other.ContainsOffset(r.Start.Byte) || other.ContainsOffset(r.End.Byte):
return true
default:
return false
}
}
// Overlap finds a range that is either identical to or a sub-range of both
// the receiver and the other given range. It returns an empty range
// within the receiver if there is no overlap between the two ranges.
//
// A non-empty result is either identical to or a subset of the receiver.
func (r Range) Overlap(other Range) Range {
if !r.Overlaps(other) {
// Start == End indicates an empty range
return Range{
Filename: r.Filename,
Start: r.Start,
End: r.Start,
}
}
var start, end Pos
if r.Start.Byte > other.Start.Byte {
start = r.Start
} else {
start = other.Start
}
if r.End.Byte < other.End.Byte {
end = r.End
} else {
end = other.End
}
return Range{
Filename: r.Filename,
Start: start,
End: end,
}
}
// PartitionAround finds the portion of the given range that overlaps with
// the reciever and returns three ranges: the portion of the reciever that
// precedes the overlap, the overlap itself, and then the portion of the
// reciever that comes after the overlap.
//
// If the two ranges do not overlap then all three returned ranges are empty.
//
// If the given range aligns with or extends beyond either extent of the
// reciever then the corresponding outer range will be empty.
func (r Range) PartitionAround(other Range) (before, overlap, after Range) {
overlap = r.Overlap(other)
if overlap.Empty() {
return overlap, overlap, overlap
}
before = Range{
Filename: r.Filename,
Start: r.Start,
End: overlap.Start,
}
after = Range{
Filename: r.Filename,
Start: overlap.End,
End: r.End,
}
return before, overlap, after
}

148
vendor/github.com/hashicorp/hcl2/hcl/pos_scanner.go generated vendored Normal file
View File

@ -0,0 +1,148 @@
package hcl
import (
"bufio"
"bytes"
"github.com/apparentlymart/go-textseg/textseg"
)
// RangeScanner is a helper that will scan over a buffer using a bufio.SplitFunc
// and visit a source range for each token matched.
//
// For example, this can be used with bufio.ScanLines to find the source range
// for each line in the file, skipping over the actual newline characters, which
// may be useful when printing source code snippets as part of diagnostic
// messages.
//
// The line and column information in the returned ranges is produced by
// counting newline characters and grapheme clusters respectively, which
// mimics the behavior we expect from a parser when producing ranges.
type RangeScanner struct {
filename string
b []byte
cb bufio.SplitFunc
pos Pos // position of next byte to process in b
cur Range // latest range
tok []byte // slice of b that is covered by cur
err error // error from last scan, if any
}
// Create a new RangeScanner for the given buffer, producing ranges for the
// given filename.
//
// Since ranges have grapheme-cluster granularity rather than byte granularity,
// the scanner will produce incorrect results if the given SplitFunc creates
// tokens between grapheme cluster boundaries. In particular, it is incorrect
// to use RangeScanner with bufio.ScanRunes because it will produce tokens
// around individual UTF-8 sequences, which will split any multi-sequence
// grapheme clusters.
func NewRangeScanner(b []byte, filename string, cb bufio.SplitFunc) *RangeScanner {
return &RangeScanner{
filename: filename,
b: b,
cb: cb,
pos: Pos{
Byte: 0,
Line: 1,
Column: 1,
},
}
}
func (sc *RangeScanner) Scan() bool {
if sc.pos.Byte >= len(sc.b) || sc.err != nil {
// All done
return false
}
// Since we're operating on an in-memory buffer, we always pass the whole
// remainder of the buffer to our SplitFunc and set isEOF to let it know
// that it has the whole thing.
advance, token, err := sc.cb(sc.b[sc.pos.Byte:], true)
// Since we are setting isEOF to true this should never happen, but
// if it does we will just abort and assume the SplitFunc is misbehaving.
if advance == 0 && token == nil && err == nil {
return false
}
if err != nil {
sc.err = err
sc.cur = Range{
Filename: sc.filename,
Start: sc.pos,
End: sc.pos,
}
sc.tok = nil
return false
}
sc.tok = token
start := sc.pos
end := sc.pos
new := sc.pos
// adv is similar to token but it also includes any subsequent characters
// we're being asked to skip over by the SplitFunc.
// adv is a slice covering any additional bytes we are skipping over, based
// on what the SplitFunc told us to do with advance.
adv := sc.b[sc.pos.Byte : sc.pos.Byte+advance]
// We now need to scan over our token to count the grapheme clusters
// so we can correctly advance Column, and count the newlines so we
// can correctly advance Line.
advR := bytes.NewReader(adv)
gsc := bufio.NewScanner(advR)
advanced := 0
gsc.Split(textseg.ScanGraphemeClusters)
for gsc.Scan() {
gr := gsc.Bytes()
new.Byte += len(gr)
new.Column++
// We rely here on the fact that \r\n is considered a grapheme cluster
// and so we don't need to worry about miscounting additional lines
// on files with Windows-style line endings.
if len(gr) != 0 && (gr[0] == '\r' || gr[0] == '\n') {
new.Column = 1
new.Line++
}
if advanced < len(token) {
// If we've not yet found the end of our token then we'll
// also push our "end" marker along.
// (if advance > len(token) then we'll stop moving "end" early
// so that the caller only sees the range covered by token.)
end = new
}
advanced += len(gr)
}
sc.cur = Range{
Filename: sc.filename,
Start: start,
End: end,
}
sc.pos = new
return true
}
// Range returns a range that covers the latest token obtained after a call
// to Scan returns true.
func (sc *RangeScanner) Range() Range {
return sc.cur
}
// Bytes returns the slice of the input buffer that is covered by the range
// that would be returned by Range.
func (sc *RangeScanner) Bytes() []byte {
return sc.tok
}
// Err can be called after Scan returns false to determine if the latest read
// resulted in an error, and obtain that error if so.
func (sc *RangeScanner) Err() error {
return sc.err
}

View File

@ -0,0 +1,193 @@
package hcl
import (
"bufio"
"reflect"
"testing"
"github.com/davecgh/go-spew/spew"
)
func TestPosScanner(t *testing.T) {
tests := map[string]struct {
Input string
Want []Range
WantToks [][]byte
}{
"empty": {
"",
[]Range{},
[][]byte{},
},
"single line": {
"hello",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
[][]byte{
[]byte("hello"),
},
},
"single line with trailing UNIX newline": {
"hello\n",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
[][]byte{
[]byte("hello"),
},
},
"single line with trailing Windows newline": {
"hello\r\n",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
[][]byte{
[]byte("hello"),
},
},
"two lines with UNIX newline": {
"hello\nworld",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
{
Start: Pos{Byte: 6, Line: 2, Column: 1},
End: Pos{Byte: 11, Line: 2, Column: 6},
},
},
[][]byte{
[]byte("hello"),
[]byte("world"),
},
},
"two lines with Windows newline": {
"hello\r\nworld",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
{
Start: Pos{Byte: 7, Line: 2, Column: 1},
End: Pos{Byte: 12, Line: 2, Column: 6},
},
},
[][]byte{
[]byte("hello"),
[]byte("world"),
},
},
"blank line with UNIX newlines": {
"hello\n\nworld",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
{
Start: Pos{Byte: 6, Line: 2, Column: 1},
End: Pos{Byte: 6, Line: 2, Column: 1},
},
{
Start: Pos{Byte: 7, Line: 3, Column: 1},
End: Pos{Byte: 12, Line: 3, Column: 6},
},
},
[][]byte{
[]byte("hello"),
[]byte(""),
[]byte("world"),
},
},
"blank line with Windows newlines": {
"hello\r\n\r\nworld",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
{
Start: Pos{Byte: 7, Line: 2, Column: 1},
End: Pos{Byte: 7, Line: 2, Column: 1},
},
{
Start: Pos{Byte: 9, Line: 3, Column: 1},
End: Pos{Byte: 14, Line: 3, Column: 6},
},
},
[][]byte{
[]byte("hello"),
[]byte(""),
[]byte("world"),
},
},
"two lines with combiner and UNIX newline": {
"foo \U0001f469\U0001f3ff bar\nbaz",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 16, Line: 1, Column: 10},
},
{
Start: Pos{Byte: 17, Line: 2, Column: 1},
End: Pos{Byte: 20, Line: 2, Column: 4},
},
},
[][]byte{
[]byte("foo \U0001f469\U0001f3ff bar"),
[]byte("baz"),
},
},
"two lines with combiner and Windows newline": {
"foo \U0001f469\U0001f3ff bar\r\nbaz",
[]Range{
{
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 16, Line: 1, Column: 10},
},
{
Start: Pos{Byte: 18, Line: 2, Column: 1},
End: Pos{Byte: 21, Line: 2, Column: 4},
},
},
[][]byte{
[]byte("foo \U0001f469\U0001f3ff bar"),
[]byte("baz"),
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
src := []byte(test.Input)
sc := NewRangeScanner(src, "", bufio.ScanLines)
got := make([]Range, 0)
gotToks := make([][]byte, 0)
for sc.Scan() {
got = append(got, sc.Range())
gotToks = append(gotToks, sc.Bytes())
}
if sc.Err() != nil {
t.Fatalf("unexpected error: %s", sc.Err())
}
if !reflect.DeepEqual(got, test.Want) {
t.Errorf("incorrect ranges\ngot: %swant: %s", spew.Sdump(got), spew.Sdump(test.Want))
}
if !reflect.DeepEqual(gotToks, test.WantToks) {
t.Errorf("incorrect tokens\ngot: %swant: %s", spew.Sdump(gotToks), spew.Sdump(test.WantToks))
}
})
}
}

467
vendor/github.com/hashicorp/hcl2/hcl/pos_test.go generated vendored Normal file
View File

@ -0,0 +1,467 @@
package hcl
import (
"bytes"
"fmt"
"reflect"
"testing"
)
func TestRangeOver(t *testing.T) {
tests := []struct {
A Range
B Range
Want Range
}{
{
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ####
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // #####
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // #####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
},
{
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ###
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ###
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ##
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 2, Line: 1, Column: 3},
},
Range{ // ##
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
Range{ // ######
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
},
{
Range{ // ##
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
Range{ // ##
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 2, Line: 1, Column: 3},
},
Range{ // ######
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s<=>%s", test.A, test.B), func(t *testing.T) {
got := RangeOver(test.A, test.B)
if !reflect.DeepEqual(got, test.Want) {
t.Errorf(
"wrong result\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s",
visRangeOffsets(test.A), test.A,
visRangeOffsets(test.B), test.B,
visRangeOffsets(got), got,
visRangeOffsets(test.Want), test.Want,
)
}
})
}
}
func TestPosOverlap(t *testing.T) {
tests := []struct {
A Range
B Range
Want Range
}{
{
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
},
{
Range{ // ####
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ###
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
},
{
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ###
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
},
{
Range{ // ###
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ###
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
},
{
Range{ // ###
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ###
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ##
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 2, Line: 1, Column: 3},
},
Range{ // ##
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
Range{ // (no overlap)
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 0, Line: 1, Column: 1},
},
},
{
Range{ // ##
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 6, Line: 1, Column: 7},
},
Range{ // ##
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 2, Line: 1, Column: 3},
},
Range{ // (no overlap)
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s<=>%s", test.A, test.B), func(t *testing.T) {
got := test.A.Overlap(test.B)
if !reflect.DeepEqual(got, test.Want) {
t.Errorf(
"wrong result\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s",
visRangeOffsets(test.A), test.A,
visRangeOffsets(test.B), test.B,
visRangeOffsets(got), got,
visRangeOffsets(test.Want), test.Want,
)
}
})
}
}
func TestRangePartitionAround(t *testing.T) {
tests := []struct {
Outer Range
Inner Range
WantBefore Range
WantOverlap Range
WantAfter Range
}{
{
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // (empty)
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 2, Line: 1, Column: 3},
},
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // (empty)
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
},
{
Range{ // ####
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // #
Start: Pos{Byte: 0, Line: 1, Column: 1},
End: Pos{Byte: 1, Line: 1, Column: 2},
},
Range{ // ###
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // (empty)
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
},
{
Range{ // ####
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // (empty)
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 2, Line: 1, Column: 3},
},
Range{ // ###
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // #
Start: Pos{Byte: 5, Line: 1, Column: 6},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
{
Range{ // ####
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // #
Start: Pos{Byte: 1, Line: 1, Column: 2},
End: Pos{Byte: 2, Line: 1, Column: 3},
},
Range{ // ##
Start: Pos{Byte: 2, Line: 1, Column: 3},
End: Pos{Byte: 4, Line: 1, Column: 5},
},
Range{ // #
Start: Pos{Byte: 4, Line: 1, Column: 5},
End: Pos{Byte: 5, Line: 1, Column: 6},
},
},
}
for _, test := range tests {
t.Run(fmt.Sprintf("%s around %s", test.Outer, test.Inner), func(t *testing.T) {
gotBefore, gotOverlap, gotAfter := test.Outer.PartitionAround(test.Inner)
if !reflect.DeepEqual(gotBefore, test.WantBefore) {
t.Errorf(
"wrong before\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s",
visRangeOffsets(test.Outer), test.Outer,
visRangeOffsets(test.Inner), test.Inner,
visRangeOffsets(gotBefore), gotBefore,
visRangeOffsets(test.WantBefore), test.WantBefore,
)
}
if !reflect.DeepEqual(gotOverlap, test.WantOverlap) {
t.Errorf(
"wrong overlap\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s",
visRangeOffsets(test.Outer), test.Outer,
visRangeOffsets(test.Inner), test.Inner,
visRangeOffsets(gotOverlap), gotOverlap,
visRangeOffsets(test.WantOverlap), test.WantOverlap,
)
}
if !reflect.DeepEqual(gotAfter, test.WantAfter) {
t.Errorf(
"wrong after\nA : %-10s %s\nB : %-10s %s\ngot : %-10s %s\nwant: %-10s %s",
visRangeOffsets(test.Outer), test.Outer,
visRangeOffsets(test.Inner), test.Inner,
visRangeOffsets(gotAfter), gotAfter,
visRangeOffsets(test.WantAfter), test.WantAfter,
)
}
})
}
}
// visRangeOffsets is a helper that produces a visual representation of the
// start and end byte offsets of the given range, which can then be stacked
// with the same for other ranges to more easily see how the ranges relate
// to one another.
func visRangeOffsets(rng Range) string {
var buf bytes.Buffer
if rng.End.Byte < rng.Start.Byte {
// Should never happen, but we'll visualize it anyway so we can
// more easily debug failing tests.
for i := 0; i < rng.End.Byte; i++ {
buf.WriteByte(' ')
}
for i := rng.End.Byte; i < rng.Start.Byte; i++ {
buf.WriteByte('!')
}
return buf.String()
}
for i := 0; i < rng.Start.Byte; i++ {
buf.WriteByte(' ')
}
for i := rng.Start.Byte; i < rng.End.Byte; i++ {
buf.WriteByte('#')
}
return buf.String()
}

View File

@ -156,6 +156,17 @@ func (t Traversal) RootName() string {
return t[0].(TraverseRoot).Name
}
// SourceRange returns the source range for the traversal.
func (t Traversal) SourceRange() Range {
if len(t) == 0 {
// Nothing useful to return here, but we'll return something
// that's correctly-typed at least.
return Range{}
}
return RangeBetween(t[0].SourceRange(), t[len(t)-1].SourceRange())
}
// TraversalSplit represents a pair of traversals, the first of which is
// an absolute traversal and the second of which is relative to the first.
//
@ -206,6 +217,7 @@ func (t TraversalSplit) RootName() string {
// A Traverser is a step within a Traversal.
type Traverser interface {
TraversalStep(cty.Value) (cty.Value, Diagnostics)
SourceRange() Range
isTraverserSigil() isTraverser
}
@ -231,6 +243,10 @@ func (tn TraverseRoot) TraversalStep(cty.Value) (cty.Value, Diagnostics) {
panic("Cannot traverse an absolute traversal")
}
func (tn TraverseRoot) SourceRange() Range {
return tn.SrcRange
}
// TraverseAttr looks up an attribute in its initial value.
type TraverseAttr struct {
isTraverser
@ -301,6 +317,10 @@ func (tn TraverseAttr) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
}
}
func (tn TraverseAttr) SourceRange() Range {
return tn.SrcRange
}
// TraverseIndex applies the index operation to its initial value.
type TraverseIndex struct {
isTraverser
@ -312,6 +332,10 @@ func (tn TraverseIndex) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
return Index(val, tn.Key, &tn.SrcRange)
}
func (tn TraverseIndex) SourceRange() Range {
return tn.SrcRange
}
// TraverseSplat applies the splat operation to its initial value.
type TraverseSplat struct {
isTraverser
@ -322,3 +346,7 @@ type TraverseSplat struct {
func (tn TraverseSplat) TraversalStep(val cty.Value) (cty.Value, Diagnostics) {
panic("TraverseSplat not yet implemented")
}
func (tn TraverseSplat) SourceRange() Range {
return tn.SrcRange
}

View File

@ -0,0 +1,55 @@
package hcl
// AbsTraversalForExpr attempts to interpret the given expression as
// an absolute traversal, or returns error diagnostic(s) if that is
// not possible for the given expression.
//
// A particular Expression implementation can support this function by
// offering a method called AsTraversal that takes no arguments and
// returns either a valid absolute traversal or nil to indicate that
// no traversal is possible.
//
// In most cases the calling application is interested in the value
// that results from an expression, but in rarer cases the application
// needs to see the the name of the variable and subsequent
// attributes/indexes itself, for example to allow users to give references
// to the variables themselves rather than to their values. An implementer
// of this function should at least support attribute and index steps.
func AbsTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
type asTraversal interface {
AsTraversal() Traversal
}
if asT, supported := expr.(asTraversal); supported {
if traversal := asT.AsTraversal(); traversal != nil {
return traversal, nil
}
}
return nil, Diagnostics{
&Diagnostic{
Severity: DiagError,
Summary: "Invalid expression",
Detail: "A static variable reference is required.",
Subject: expr.Range().Ptr(),
},
}
}
// RelTraversalForExpr is similar to AbsTraversalForExpr but it returns
// a relative traversal instead. Due to the nature of ZCL expressions, the
// first element of the returned traversal is always a TraverseAttr, and
// then it will be followed by zero or more other expressions.
//
// Any expression accepted by AbsTraversalForExpr is also accepted by
// RelTraversalForExpr.
func RelTraversalForExpr(expr Expression) (Traversal, Diagnostics) {
traversal, diags := AbsTraversalForExpr(expr)
if len(traversal) > 0 {
root := traversal[0].(TraverseRoot)
traversal[0] = TraverseAttr{
Name: root.Name,
SrcRange: root.SrcRange,
}
}
return traversal, diags
}

View File

@ -0,0 +1,128 @@
package hcl
import (
"testing"
)
type asTraversalSupported struct {
staticExpr
RootName string
}
type asTraversalNotSupported struct {
staticExpr
}
type asTraversalDeclined struct {
staticExpr
}
func (e asTraversalSupported) AsTraversal() Traversal {
return Traversal{
TraverseRoot{
Name: e.RootName,
},
}
}
func (e asTraversalDeclined) AsTraversal() Traversal {
return nil
}
func TestAbsTraversalForExpr(t *testing.T) {
tests := []struct {
Expr Expression
WantRootName string
}{
{
asTraversalSupported{RootName: "foo"},
"foo",
},
{
asTraversalNotSupported{},
"",
},
{
asTraversalDeclined{},
"",
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
got, diags := AbsTraversalForExpr(test.Expr)
switch {
case got != nil:
if test.WantRootName == "" {
t.Fatalf("traversal was returned; want error")
}
if len(got) != 1 {
t.Fatalf("wrong traversal length %d; want 1", len(got))
}
gotRoot, ok := got[0].(TraverseRoot)
if !ok {
t.Fatalf("first traversal step is %T; want hcl.TraverseRoot", got[0])
}
if gotRoot.Name != test.WantRootName {
t.Errorf("wrong root name %q; want %q", gotRoot.Name, test.WantRootName)
}
default:
if !diags.HasErrors() {
t.Errorf("returned nil traversal without error diagnostics")
}
if test.WantRootName != "" {
t.Errorf("traversal was not returned; want TraverseRoot(%q)", test.WantRootName)
}
}
})
}
}
func TestRelTraversalForExpr(t *testing.T) {
tests := []struct {
Expr Expression
WantFirstName string
}{
{
asTraversalSupported{RootName: "foo"},
"foo",
},
{
asTraversalNotSupported{},
"",
},
{
asTraversalDeclined{},
"",
},
}
for _, test := range tests {
t.Run("", func(t *testing.T) {
got, diags := RelTraversalForExpr(test.Expr)
switch {
case got != nil:
if test.WantFirstName == "" {
t.Fatalf("traversal was returned; want error")
}
if len(got) != 1 {
t.Fatalf("wrong traversal length %d; want 1", len(got))
}
gotRoot, ok := got[0].(TraverseAttr)
if !ok {
t.Fatalf("first traversal step is %T; want hcl.TraverseAttr", got[0])
}
if gotRoot.Name != test.WantFirstName {
t.Errorf("wrong root name %q; want %q", gotRoot.Name, test.WantFirstName)
}
default:
if !diags.HasErrors() {
t.Errorf("returned nil traversal without error diagnostics")
}
if test.WantFirstName != "" {
t.Errorf("traversal was not returned; want TraverseAttr(%q)", test.WantFirstName)
}
}
})
}
}

View File

@ -1,3 +1,37 @@
## 0.11.2 (January 9, 2018)
BACKWARDS INCOMPATIBILITIES / NOTES:
* backend/gcs: The gcs remote state backend was erroneously creating the state bucket if it didn't exist. This is not the intended behavior of backends, as Terraform cannot track or manage that resource. The target bucket must now be created separately, before using it with Terraform. ([#16865](https://github.com/hashicorp/terraform/issues/16865))
NEW FEATURES:
* **[Habitat](https://www.habitat.sh/) Provisioner** allowing automatic installation of the Habitat agent ([#16280](https://github.com/hashicorp/terraform/issues/16280))
IMPROVEMENTS:
* core: removed duplicate prompts and clarified working when migration backend configurations ([#16939](https://github.com/hashicorp/terraform/issues/16939))
* config: new `rsadecrypt` interpolation function allows decrypting a base64-encoded ciphertext using a given private key. This is particularly useful for decrypting the password for a Windows instance on AWS EC2, but is generic and may find other uses too. ([#16647](https://github.com/hashicorp/terraform/issues/16647))
* config: new `timeadd` interpolation function allows calculating a new timestamp relative to an existing known timestamp. ([#16644](https://github.com/hashicorp/terraform/issues/16644))
* cli: Passing an empty string to `-plugin-dir` during init will remove previously saved paths ([#16969](https://github.com/hashicorp/terraform/issues/16969))
* cli: Module and provider installation (and some other Terraform features) now implement [RFC6555](https://tools.ietf.org/html/rfc6555) when making outgoing HTTP requests, which should improve installation reliability for dual-stack (both IPv4 and IPv6) hosts running on networks that have non-performant or broken IPv6 Internet connectivity by trying both IPv4 and IPv6 connections. ([#16805](https://github.com/hashicorp/terraform/issues/16805))
* backend/s3: it is now possible to disable the region check, for improved compatibility with third-party services that attempt to mimic the S3 API. ([#16757](https://github.com/hashicorp/terraform/issues/16757))
* backend/s3: it is now possible to for the path-based S3 API form, for improved compatibility with third-party services that attempt to mimic the S3 API. ([#17001](https://github.com/hashicorp/terraform/issues/17001))
* backend/s3: it is now possible to use named credentials from the `~/.aws/credentials` file, similarly to the AWS plugin ([#16661](https://github.com/hashicorp/terraform/issues/16661))
* backend/manta: support for Triton RBAC ([#17003](https://github.com/hashicorp/terraform/issues/17003))
* backend/gcs: support for customer-supplied encryption keys for remote state buckets ([#16936](https://github.com/hashicorp/terraform/issues/16936))
* provider/terraform: in `terraform_remote_state`, the argument `environment` is now deprecated in favor of `workspace`. The `environment` argument will be removed in a later Terraform release. ([#16558](https://github.com/hashicorp/terraform/issues/16558))
BUG FIXES:
* config: fixed crash in `substr` interpolation function with invalid offset ([#17043](https://github.com/hashicorp/terraform/issues/17043))
* config: Referencing a count attribute in an output no longer generates a warning ([#16866](https://github.com/hashicorp/terraform/issues/16866))
* cli: Terraform will no longer crash when `terraform plan`, `terraform apply`, and some other commands encounter an invalid provider version constraint in configuration, generating a proper error message instead. ([#16867](https://github.com/hashicorp/terraform/issues/16867))
* backend/gcs: The usage of the GOOGLE_CREDENTIALS environment variable now matches that of the google provider ([#16865](https://github.com/hashicorp/terraform/issues/16865))
* backend/gcs: fixed the locking methodology to avoid "double-locking" issues when used with the `terraform_remote_state` data source ([#16852](https://github.com/hashicorp/terraform/issues/16852))
* backend/s3: the `workspace_key_prefix` can now be an empty string or contain slashes ([#16932](https://github.com/hashicorp/terraform/issues/16932))
* provisioner/salt-masterless: now waits for all of the remote operations to complete before returning ([#16704](https://github.com/hashicorp/terraform/issues/16704))
## 0.11.1 (November 30, 2017)
IMPROVEMENTS:

View File

@ -63,6 +63,8 @@ func initCommands(config *Config) {
RunningInAutomation: inAutomation,
PluginCacheDir: config.PluginCacheDir,
OverrideDataDir: dataDir,
ShutdownCh: makeShutdownCh(),
}
// The command list is included in the terraform -help
@ -81,14 +83,12 @@ func initCommands(config *Config) {
"apply": func() (cli.Command, error) {
return &command.ApplyCommand{
Meta: meta,
ShutdownCh: makeShutdownCh(),
}, nil
},
"console": func() (cli.Command, error) {
return &command.ConsoleCommand{
Meta: meta,
ShutdownCh: makeShutdownCh(),
}, nil
},
@ -96,7 +96,6 @@ func initCommands(config *Config) {
return &command.ApplyCommand{
Meta: meta,
Destroy: true,
ShutdownCh: makeShutdownCh(),
}, nil
},

View File

@ -8,6 +8,7 @@ import (
"strconv"
"strings"
hcl2 "github.com/hashicorp/hcl2/hcl"
"github.com/hashicorp/hil/ast"
"github.com/hashicorp/terraform/helper/hilmapstructure"
"github.com/hashicorp/terraform/plugin/discovery"
@ -415,10 +416,17 @@ func (c *Config) Validate() tfdiags.Diagnostics {
if p.Version != "" {
_, err := discovery.ConstraintStr(p.Version).Parse()
if err != nil {
diags = diags.Append(fmt.Errorf(
"provider.%s: invalid version constraint %q: %s",
name, p.Version, err,
))
diags = diags.Append(&hcl2.Diagnostic{
Severity: hcl2.DiagError,
Summary: "Invalid provider version constraint",
Detail: fmt.Sprintf(
"The value %q given for provider.%s is not a valid version constraint.",
p.Version, name,
),
// TODO: include a "Subject" source reference in here,
// once the config loader is able to retain source
// location information.
})
}
}
@ -849,7 +857,7 @@ func (c *Config) Validate() tfdiags.Diagnostics {
// a count might dynamically be set to something
// other than 1 and thus splat syntax is still needed
// to be safe.
if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" {
if r.RawCount != nil && r.RawCount.Raw != nil && r.RawCount.Raw["count"] != "1" && rv.Field != "count" {
diags = diags.Append(tfdiags.SimpleWarning(fmt.Sprintf(
"output %q: must use splat syntax to access %s attribute %q, because it has \"count\" set; use %s.*.%s to obtain a list of the attributes across all instances",
o.Name,

View File

@ -217,7 +217,7 @@ func TestConfigValidate_table(t *testing.T) {
"provider with invalid version constraint",
"provider-version-invalid",
true,
"invalid version constraint",
"not a valid version constraint",
},
{
"invalid provider name in module block",

View File

@ -2,7 +2,7 @@
package configschema
import "fmt"
import "strconv"
const _NestingMode_name = "nestingModeInvalidNestingSingleNestingListNestingSetNestingMap"
@ -10,7 +10,7 @@ var _NestingMode_index = [...]uint8{0, 18, 31, 42, 52, 62}
func (i NestingMode) String() string {
if i < 0 || i >= NestingMode(len(_NestingMode_index)-1) {
return fmt.Sprintf("NestingMode(%d)", i)
return "NestingMode(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _NestingMode_name[_NestingMode_index[i]:_NestingMode_index[i+1]]
}

View File

@ -4,12 +4,15 @@ import (
"bytes"
"compress/gzip"
"crypto/md5"
"crypto/rsa"
"crypto/sha1"
"crypto/sha256"
"crypto/sha512"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
"fmt"
"io/ioutil"
"math"
@ -103,6 +106,7 @@ func Funcs() map[string]ast.Function {
"pow": interpolationFuncPow(),
"uuid": interpolationFuncUUID(),
"replace": interpolationFuncReplace(),
"rsadecrypt": interpolationFuncRsaDecrypt(),
"sha1": interpolationFuncSha1(),
"sha256": interpolationFuncSha256(),
"sha512": interpolationFuncSha512(),
@ -112,6 +116,7 @@ func Funcs() map[string]ast.Function {
"split": interpolationFuncSplit(),
"substr": interpolationFuncSubstr(),
"timestamp": interpolationFuncTimestamp(),
"timeadd": interpolationFuncTimeAdd(),
"title": interpolationFuncTitle(),
"transpose": interpolationFuncTranspose(),
"trimspace": interpolationFuncTrimSpace(),
@ -1504,6 +1509,29 @@ func interpolationFuncTimestamp() ast.Function {
}
}
func interpolationFuncTimeAdd() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{
ast.TypeString, // input timestamp string in RFC3339 format
ast.TypeString, // duration to add to input timestamp that should be parsable by time.ParseDuration
},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
ts, err := time.Parse(time.RFC3339, args[0].(string))
if err != nil {
return nil, err
}
duration, err := time.ParseDuration(args[1].(string))
if err != nil {
return nil, err
}
return ts.Add(duration).Format(time.RFC3339), nil
},
}
}
// interpolationFuncTitle implements the "title" function that returns a copy of the
// string in which first characters of all the words are capitalized.
func interpolationFuncTitle() ast.Function {
@ -1549,7 +1577,7 @@ func interpolationFuncSubstr() ast.Function {
return nil, fmt.Errorf("length should be a non-negative integer")
}
if offset > len(str) {
if offset > len(str) || offset < 0 {
return nil, fmt.Errorf("offset cannot be larger than the length of the string")
}
@ -1657,3 +1685,43 @@ func interpolationFuncAbs() ast.Function {
},
}
}
// interpolationFuncRsaDecrypt implements the "rsadecrypt" function that does
// RSA decryption.
func interpolationFuncRsaDecrypt() ast.Function {
return ast.Function{
ArgTypes: []ast.Type{ast.TypeString, ast.TypeString},
ReturnType: ast.TypeString,
Callback: func(args []interface{}) (interface{}, error) {
s := args[0].(string)
key := args[1].(string)
b, err := base64.StdEncoding.DecodeString(s)
if err != nil {
return "", fmt.Errorf("Failed to decode input %q: cipher text must be base64-encoded", key)
}
block, _ := pem.Decode([]byte(key))
if block == nil {
return "", fmt.Errorf("Failed to read key %q: no key found", key)
}
if block.Headers["Proc-Type"] == "4,ENCRYPTED" {
return "", fmt.Errorf(
"Failed to read key %q: password protected keys are\n"+
"not supported. Please decrypt the key prior to use.", key)
}
x509Key, err := x509.ParsePKCS1PrivateKey(block.Bytes)
if err != nil {
return "", err
}
out, err := rsa.DecryptPKCS1v15(nil, x509Key, b)
if err != nil {
return "", err
}
return string(out), nil
},
}
}

View File

@ -2426,6 +2426,38 @@ func TestInterpolateFuncTimestamp(t *testing.T) {
}
}
func TestInterpolateFuncTimeAdd(t *testing.T) {
testFunction(t, testFunctionConfig{
Cases: []testFunctionCase{
{
`${timeadd("2017-11-22T00:00:00Z", "1s")}`,
"2017-11-22T00:00:01Z",
false,
},
{
`${timeadd("2017-11-22T00:00:00Z", "10m1s")}`,
"2017-11-22T00:10:01Z",
false,
},
{ // also support subtraction
`${timeadd("2017-11-22T00:00:00Z", "-1h")}`,
"2017-11-21T23:00:00Z",
false,
},
{ // Invalid format timestamp
`${timeadd("2017-11-22", "-1h")}`,
nil,
true,
},
{ // Invalid format duration (day is not supported by ParseDuration)
`${timeadd("2017-11-22T00:00:00Z", "1d")}`,
nil,
true,
},
},
})
}
type testFunctionConfig struct {
Cases []testFunctionCase
Vars map[string]ast.Variable
@ -2536,6 +2568,11 @@ func TestInterpolateFuncSubstr(t *testing.T) {
nil,
true,
},
{
`${substr("foo", -4, -1)}`,
nil,
true,
},
// invalid length
{
@ -2780,3 +2817,146 @@ func TestInterpolateFuncAbs(t *testing.T) {
},
})
}
func TestInterpolateFuncRsaDecrypt(t *testing.T) {
testFunction(t, testFunctionConfig{
Vars: map[string]ast.Variable{
"var.cipher_base64": ast.Variable{
Type: ast.TypeString,
Value: "eczGaDhXDbOFRZGhjx2etVzWbRqWDlmq0bvNt284JHVbwCgObiuyX9uV0LSAMY707IEgMkExJqXmsB4OWKxvB7epRB9G/3+F+pcrQpODlDuL9oDUAsa65zEpYF0Wbn7Oh7nrMQncyUPpyr9WUlALl0gRWytOA23S+y5joa4M34KFpawFgoqTu/2EEH4Xl1zo+0fy73fEto+nfkUY+meuyGZ1nUx/+DljP7ZqxHBFSlLODmtuTMdswUbHbXbWneW51D7Jm7xB8nSdiA2JQNK5+Sg5x8aNfgvFTt/m2w2+qpsyFa5Wjeu6fZmXSl840CA07aXbk9vN4I81WmJyblD/ZA==",
},
"var.private_key": ast.Variable{
Type: ast.TypeString,
Value: `
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAgUElV5mwqkloIrM8ZNZ72gSCcnSJt7+/Usa5G+D15YQUAdf9
c1zEekTfHgDP+04nw/uFNFaE5v1RbHaPxhZYVg5ZErNCa/hzn+x10xzcepeS3KPV
Xcxae4MR0BEegvqZqJzN9loXsNL/c3H/B+2Gle3hTxjlWFb3F5qLgR+4Mf4ruhER
1v6eHQa/nchi03MBpT4UeJ7MrL92hTJYLdpSyCqmr8yjxkKJDVC2uRrr+sTSxfh7
r6v24u/vp/QTmBIAlNPgadVAZw17iNNb7vjV7Gwl/5gHXonCUKURaV++dBNLrHIZ
pqcAM8wHRph8mD1EfL9hsz77pHewxolBATV+7QIDAQABAoIBAC1rK+kFW3vrAYm3
+8/fQnQQw5nec4o6+crng6JVQXLeH32qXShNf8kLLG/Jj0vaYcTPPDZw9JCKkTMQ
0mKj9XR/5DLbBMsV6eNXXuvJJ3x4iKW5eD9WkLD4FKlNarBRyO7j8sfPTqXW7uat
NxWdFH7YsSRvNh/9pyQHLWA5OituidMrYbc3EUx8B1GPNyJ9W8Q8znNYLfwYOjU4
Wv1SLE6qGQQH9Q0WzA2WUf8jklCYyMYTIywAjGb8kbAJlKhmj2t2Igjmqtwt1PYc
pGlqbtQBDUiWXt5S4YX/1maIQ/49yeNUajjpbJiH3DbhJbHwFTzP3pZ9P9GHOzlG
kYR+wSECgYEAw/Xida8kSv8n86V3qSY/I+fYQ5V+jDtXIE+JhRnS8xzbOzz3v0WS
Oo5H+o4nJx5eL3Ghb3Gcm0Jn46dHrxinHbm+3RjXv/X6tlbxIYjRSQfHOTSMCTvd
qcliF5vC6RCLXuc7R+IWR1Ky6eDEZGtrvt3DyeYABsp9fRUFR/6NluUCgYEAqNsw
1aSl7WJa27F0DoJdlU9LWerpXcazlJcIdOz/S9QDmSK3RDQTdqfTxRmrxiYI9LEs
mkOkvzlnnOBMpnZ3ZOU5qIRfprecRIi37KDAOHWGnlC0EWGgl46YLb7/jXiWf0AG
Y+DfJJNd9i6TbIDWu8254/erAS6bKMhW/3q7f2kCgYAZ7Id/BiKJAWRpqTRBXlvw
BhXoKvjI2HjYP21z/EyZ+PFPzur/lNaZhIUlMnUfibbwE9pFggQzzf8scM7c7Sf+
mLoVSdoQ/Rujz7CqvQzi2nKSsM7t0curUIb3lJWee5/UeEaxZcmIufoNUrzohAWH
BJOIPDM4ssUTLRq7wYM9uQKBgHCBau5OP8gE6mjKuXsZXWUoahpFLKwwwmJUp2vQ
pOFPJ/6WZOlqkTVT6QPAcPUbTohKrF80hsZqZyDdSfT3peFx4ZLocBrS56m6NmHR
UYHMvJ8rQm76T1fryHVidz85g3zRmfBeWg8yqT5oFg4LYgfLsPm1gRjOhs8LfPvI
OLlRAoGBAIZ5Uv4Z3s8O7WKXXUe/lq6j7vfiVkR1NW/Z/WLKXZpnmvJ7FgxN4e56
RXT7GwNQHIY8eDjDnsHxzrxd+raOxOZeKcMHj3XyjCX3NHfTscnsBPAGYpY/Wxzh
T8UYnFu6RzkixElTf2rseEav7rkdKkI3LAeIZy7B0HulKKsmqVQ7
-----END RSA PRIVATE KEY-----
`,
},
"var.wrong_private_key": ast.Variable{
Type: ast.TypeString,
Value: `
-----BEGIN RSA PRIVATE KEY-----
MIIEowIBAAKCAQEAlrCgnEVgmNKCq7KPc+zUU5IrxPu1ClMNJS7RTsTPEkbwe5SB
p+6V6WtCbD/X/lDRRGbOENChh1Phulb7lViqgrdpHydgsrKoS5ah3DfSIxLFLE00
9Yo4TCYwgw6+s59j16ZAFVinaQ9l6Kmrb2ll136hMrz8QKh+qw+onOLd38WFgm+W
ZtUqSXf2LANzfzzy4OWFNyFqKaCAolSkPdTS9Nz+svtScvp002DQp8OdP1AgPO+l
o5N3M38Fftapwg0pCtJ5Zq0NRWIXEonXiTEMA6zy3gEZVOmDxoIFUWnmrqlMJLFy
5S6LDrHSdqJhCxDK6WRZj43X9j8spktk3eGhMwIDAQABAoIBAAem8ID/BOi9x+Tw
LFi2rhGQWqimH4tmrEQ3HGnjlKBY+d1MrUjZ1MMFr1nP5CgF8pqGnfA8p/c3Sz8r
K5tp5T6+EZiDZ2WrrOApxg5ox0MAsQKO6SGO40z6o3wEQ6rbbTaGOrraxaWQIpyu
AQanU4Sd6ZGqByVBaS1GnklZO+shCHqw73b7g1cpLEmFzcYnKHYHlUUIsstMe8E1
BaCY0CH7JbWBjcbiTnBVwIRZuu+EjGiQuhTilYL2OWqoMVg1WU0L2IFpR8lkf/2W
SBx5J6xhwbBGASOpM+qidiN580GdPzGhWYSqKGroHEzBm6xPSmV1tadNA26WFG4p
pthLiAECgYEA5BsPRpNYJAQLu5B0N7mj9eEp0HABVEgL/MpwiImjaKdAwp78HM64
IuPvJxs7r+xESiIz4JyjR8zrQjYOCKJsARYkmNlEuAz0SkHabCw1BdEBwUhjUGVB
efoERK6GxfAoNqmSDwsOvHFOtsmDIlbHmg7G2rUxNVpeou415BSB0B8CgYEAqR4J
YHKk2Ibr9rU+rBU33TcdTGw0aAkFNAVeqM9j0haWuFXmV3RArgoy09lH+2Ha6z/g
fTX2xSDAWV7QUlLOlBRIhurPAo2jO2yCrGHPZcWiugstrR2hTTInigaSnCmK3i7F
6sYmL3S7K01IcVNxSlWvGijtClT92Cl2WUCTfG0CgYAiEjyk4QtQTd5mxLvnOu5X
oqs5PBGmwiAwQRiv/EcRMbJFn7Oupd3xMDSflbzDmTnWDOfMy/jDl8MoH6TW+1PA
kcsjnYhbKWwvz0hN0giVdtOZSDO1ZXpzOrn6fEsbM7T9/TQY1SD9WrtUKCNTNL0Z
sM1ZC6lu+7GZCpW4HKwLJwKBgQCRT0yxQXBg1/UxwuO5ynV4rx2Oh76z0WRWIXMH
S0MyxdP1SWGkrS/SGtM3cg/GcHtA/V6vV0nUcWK0p6IJyjrTw2XZ/zGluPuTWJYi
9dvVT26Vunshrz7kbH7KuwEICy3V4IyQQHeY+QzFlR70uMS0IVFWAepCoWqHbIDT
CYhwNQKBgGPcLXmjpGtkZvggl0aZr9LsvCTckllSCFSI861kivL/rijdNoCHGxZv
dfDkLTLcz9Gk41rD9Gxn/3sqodnTAc3Z2PxFnzg1Q/u3+x6YAgBwI/g/jE2xutGW
H7CurtMwALQ/n/6LUKFmjRZjqbKX9SO2QSaC3grd6sY9Tu+bZjLe
-----END RSA PRIVATE KEY-----
`,
},
},
Cases: []testFunctionCase{
// Base-64 encoded cipher decrypts correctly
{
`${rsadecrypt(var.cipher_base64, var.private_key)}`,
"message",
false,
},
// Raw cipher
{
`${rsadecrypt(base64decode(var.cipher_base64), var.private_key)}`,
nil,
true,
},
// Wrong key
{
`${rsadecrypt(var.cipher_base64, var.wrong_private_key)}`,
nil,
true,
},
// Bad key
{
`${rsadecrypt(var.cipher_base64, "bad key")}`,
nil,
true,
},
// Empty key
{
`${rsadecrypt(var.cipher_base64, "")}`,
nil,
true,
},
// Bad cipher
{
`${rsadecrypt("bad cipher", var.private_key)}`,
nil,
true,
},
// Bad base64-encoded cipher
{
`${rsadecrypt(base64encode("bad cipher"), var.private_key)}`,
nil,
true,
},
// Empty cipher
{
`${rsadecrypt("", var.private_key)}`,
nil,
true,
},
// Too many arguments
{
`${rsadecrypt("", "", "")}`,
nil,
true,
},
// One argument
{
`${rsadecrypt("")}`,
nil,
true,
},
// No arguments
{
`${rsadecrypt()}`,
nil,
true,
},
},
})
}

View File

@ -1,335 +0,0 @@
package module
import (
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"regexp"
"sort"
"strings"
"testing"
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/registry/regsrc"
"github.com/hashicorp/terraform/registry/response"
)
// Map of module names and location of test modules.
// Only one version for now, as we only lookup latest from the registry.
type testMod struct {
location string
version string
}
const (
testCredentials = "test-auth-token"
)
// All the locationes from the mockRegistry start with a file:// scheme. If
// the the location string here doesn't have a scheme, the mockRegistry will
// find the absolute path and return a complete URL.
var testMods = map[string][]testMod{
"registry/foo/bar": {{
location: "file:///download/registry/foo/bar/0.2.3//*?archive=tar.gz",
version: "0.2.3",
}},
"registry/foo/baz": {{
location: "file:///download/registry/foo/baz/1.10.0//*?archive=tar.gz",
version: "1.10.0",
}},
"registry/local/sub": {{
location: "test-fixtures/registry-tar-subdir/foo.tgz//*?archive=tar.gz",
version: "0.1.2",
}},
"exists-in-registry/identifier/provider": {{
location: "file:///registry/exists",
version: "0.2.0",
}},
"relative/foo/bar": {{ // There is an exception for the "relative/" prefix in the test registry server
location: "/relative-path",
version: "0.2.0",
}},
"test-versions/name/provider": {
{version: "2.2.0"},
{version: "2.1.1"},
{version: "1.2.2"},
{version: "1.2.1"},
},
"private/name/provider": {
{version: "1.0.0"},
},
}
func latestVersion(versions []string) string {
var col version.Collection
for _, v := range versions {
ver, err := version.NewVersion(v)
if err != nil {
panic(err)
}
col = append(col, ver)
}
sort.Sort(col)
return col[len(col)-1].String()
}
func mockRegHandler() http.Handler {
mux := http.NewServeMux()
download := func(w http.ResponseWriter, r *http.Request) {
p := strings.TrimLeft(r.URL.Path, "/")
// handle download request
re := regexp.MustCompile(`^([-a-z]+/\w+/\w+).*/download$`)
// download lookup
matches := re.FindStringSubmatch(p)
if len(matches) != 2 {
w.WriteHeader(http.StatusBadRequest)
return
}
// check for auth
if strings.Contains(matches[0], "private/") {
if !strings.Contains(r.Header.Get("Authorization"), testCredentials) {
http.Error(w, "", http.StatusForbidden)
}
}
versions, ok := testMods[matches[1]]
if !ok {
http.NotFound(w, r)
return
}
mod := versions[0]
location := mod.location
if !strings.HasPrefix(matches[0], "relative/") && !strings.HasPrefix(location, "file:///") {
// we can't use filepath.Abs because it will clean `//`
wd, _ := os.Getwd()
location = fmt.Sprintf("file://%s/%s", wd, location)
}
w.Header().Set("X-Terraform-Get", location)
w.WriteHeader(http.StatusNoContent)
// no body
return
}
versions := func(w http.ResponseWriter, r *http.Request) {
p := strings.TrimLeft(r.URL.Path, "/")
re := regexp.MustCompile(`^([-a-z]+/\w+/\w+)/versions$`)
matches := re.FindStringSubmatch(p)
if len(matches) != 2 {
w.WriteHeader(http.StatusBadRequest)
return
}
// check for auth
if strings.Contains(matches[1], "private/") {
if !strings.Contains(r.Header.Get("Authorization"), testCredentials) {
http.Error(w, "", http.StatusForbidden)
}
}
name := matches[1]
versions, ok := testMods[name]
if !ok {
http.NotFound(w, r)
return
}
// only adding the single requested module for now
// this is the minimal that any regisry is epected to support
mpvs := &response.ModuleProviderVersions{
Source: name,
}
for _, v := range versions {
mv := &response.ModuleVersion{
Version: v.version,
}
mpvs.Versions = append(mpvs.Versions, mv)
}
resp := response.ModuleVersions{
Modules: []*response.ModuleProviderVersions{mpvs},
}
js, err := json.Marshal(resp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.Write(js)
}
mux.Handle("/v1/modules/",
http.StripPrefix("/v1/modules/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if strings.HasSuffix(r.URL.Path, "/download") {
download(w, r)
return
}
if strings.HasSuffix(r.URL.Path, "/versions") {
versions(w, r)
return
}
http.NotFound(w, r)
})),
)
mux.HandleFunc("/.well-known/terraform.json", func(w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json")
io.WriteString(w, `{"modules.v1":"http://localhost/v1/modules/"}`)
})
return mux
}
// Just enough like a registry to exercise our code.
// Returns the location of the latest version
func mockRegistry() *httptest.Server {
server := httptest.NewServer(mockRegHandler())
return server
}
// GitHub archives always contain the module source in a single subdirectory,
// so the registry will return a path with with a `//*` suffix. We need to make
// sure this doesn't intefere with our internal handling of `//` subdir.
func TestRegistryGitHubArchive(t *testing.T) {
server := mockRegistry()
defer server.Close()
disco := testDisco(server)
storage := testStorage(t, disco)
tree := NewTree("", testConfig(t, "registry-tar-subdir"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
storage.Mode = GetModeNone
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
// stop the registry server, and make sure that we don't need to call out again
server.Close()
tree = NewTree("", testConfig(t, "registry-tar-subdir"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
actual := strings.TrimSpace(tree.String())
expected := strings.TrimSpace(treeLoadSubdirStr)
if actual != expected {
t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected)
}
}
// Test that the //subdir notation can be used with registry modules
func TestRegisryModuleSubdir(t *testing.T) {
server := mockRegistry()
defer server.Close()
disco := testDisco(server)
storage := testStorage(t, disco)
tree := NewTree("", testConfig(t, "registry-subdir"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
storage.Mode = GetModeNone
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(tree.String())
expected := strings.TrimSpace(treeLoadRegistrySubdirStr)
if actual != expected {
t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected)
}
}
func TestAccRegistryDiscover(t *testing.T) {
if os.Getenv("TF_ACC") == "" {
t.Skip("skipping ACC test")
}
// simply check that we get a valid github URL for this from the registry
module, err := regsrc.ParseModuleSource("hashicorp/consul/aws")
if err != nil {
t.Fatal(err)
}
s := NewStorage("/tmp", nil, nil)
loc, err := s.lookupModuleLocation(module, "")
if err != nil {
t.Fatal(err)
}
u, err := url.Parse(loc)
if err != nil {
t.Fatal(err)
}
if !strings.HasSuffix(u.Host, "github.com") {
t.Fatalf("expected host 'github.com', got: %q", u.Host)
}
if !strings.Contains(u.String(), "consul") {
t.Fatalf("url doesn't contain 'consul': %s", u.String())
}
}
func TestAccRegistryLoad(t *testing.T) {
if os.Getenv("TF_ACC") == "" {
t.Skip("skipping ACC test")
}
storage := testStorage(t, nil)
tree := NewTree("", testConfig(t, "registry-load"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
storage.Mode = GetModeNone
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
// TODO expand this further by fetching some metadata from the registry
actual := strings.TrimSpace(tree.String())
if !strings.Contains(actual, "(path: vault)") {
t.Fatal("missing vault module, got:\n", actual)
}
}

View File

@ -1,4 +1,4 @@
// +build linux darwin openbsd netbsd solaris
// +build linux darwin openbsd netbsd solaris dragonfly
package module

View File

@ -1,16 +1,13 @@
package module
import (
"fmt"
"io/ioutil"
"log"
"net/http/httptest"
"os"
"path/filepath"
"testing"
"github.com/hashicorp/terraform/config"
"github.com/hashicorp/terraform/svchost"
"github.com/hashicorp/terraform/svchost/disco"
)
@ -49,18 +46,3 @@ func testStorage(t *testing.T, d *disco.Disco) *Storage {
t.Helper()
return NewStorage(tempDir(t), d, nil)
}
// test discovery maps registry.terraform.io, localhost, localhost.localdomain,
// and example.com to the test server.
func testDisco(s *httptest.Server) *disco.Disco {
services := map[string]interface{}{
"modules.v1": fmt.Sprintf("%s/v1/modules/", s.URL),
}
d := disco.NewDisco()
d.ForceHostServices(svchost.Hostname("registry.terraform.io"), services)
d.ForceHostServices(svchost.Hostname("localhost"), services)
d.ForceHostServices(svchost.Hostname("localhost.localdomain"), services)
d.ForceHostServices(svchost.Hostname("example.com"), services)
return d
}

View File

@ -9,6 +9,7 @@ import (
"path/filepath"
getter "github.com/hashicorp/go-getter"
"github.com/hashicorp/terraform/registry"
"github.com/hashicorp/terraform/registry/regsrc"
"github.com/hashicorp/terraform/svchost/auth"
"github.com/hashicorp/terraform/svchost/disco"
@ -73,20 +74,17 @@ type Storage struct {
Ui cli.Ui
// Mode is the GetMode that will be used for various operations.
Mode GetMode
registry *registry.Client
}
func NewStorage(dir string, services *disco.Disco, creds auth.CredentialsSource) *Storage {
s := &Storage{
StorageDir: dir,
Services: services,
Creds: creds,
}
regClient := registry.NewClient(services, creds, nil)
// make sure this isn't nil
if s.Services == nil {
s.Services = disco.NewDisco()
return &Storage{
StorageDir: dir,
registry: regClient,
}
return s
}
// loadManifest returns the moduleManifest file from the parent directory.
@ -297,17 +295,17 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e
}
rec.registry = true
log.Printf("[TRACE] %q is a registry module", mod.Module())
log.Printf("[TRACE] %q is a registry module", mod.Display())
versions, err := s.moduleVersions(mod.String())
if err != nil {
log.Printf("[ERROR] error looking up versions for %q: %s", mod.Module(), err)
log.Printf("[ERROR] error looking up versions for %q: %s", mod.Display(), err)
return rec, err
}
match, err := newestRecord(versions, constraint)
if err != nil {
log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Module(), constraint, err)
log.Printf("[INFO] no matching version for %q<%s>, %s", mod.Display(), constraint, err)
}
log.Printf("[DEBUG] matched %q version %s for %s", mod, match.Version, constraint)
@ -318,13 +316,13 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e
// we need to lookup available versions
// Only on Get if it's not found, on unconditionally on Update
if (s.Mode == GetModeGet && !found) || (s.Mode == GetModeUpdate) {
resp, err := s.lookupModuleVersions(mod)
resp, err := s.registry.Versions(mod)
if err != nil {
return rec, err
}
if len(resp.Modules) == 0 {
return rec, fmt.Errorf("module %q not found in registry", mod.Module())
return rec, fmt.Errorf("module %q not found in registry", mod.Display())
}
match, err := newestVersion(resp.Modules[0].Versions, constraint)
@ -333,12 +331,12 @@ func (s Storage) findRegistryModule(mSource, constraint string) (moduleRecord, e
}
if match == nil {
return rec, fmt.Errorf("no versions for %q found matching %q", mod.Module(), constraint)
return rec, fmt.Errorf("no versions for %q found matching %q", mod.Display(), constraint)
}
rec.Version = match.Version
rec.url, err = s.lookupModuleLocation(mod, rec.Version)
rec.url, err = s.registry.Location(mod, rec.Version)
if err != nil {
return rec, err
}

View File

@ -2,15 +2,20 @@ package module
import (
"io/ioutil"
"net/url"
"os"
"path/filepath"
"strings"
"testing"
"github.com/hashicorp/terraform/registry/regsrc"
"github.com/hashicorp/terraform/registry/test"
)
func TestGetModule(t *testing.T) {
server := mockRegistry()
server := test.Registry()
defer server.Close()
disco := testDisco(server)
disco := test.Disco(server)
td, err := ioutil.TempDir("", "tf")
if err != nil {
@ -19,7 +24,7 @@ func TestGetModule(t *testing.T) {
defer os.RemoveAll(td)
storage := NewStorage(td, disco, nil)
// this module exists in a test fixture, and is known by the mockRegistry
// this module exists in a test fixture, and is known by the test.Registry
// relative to our cwd.
err = storage.GetModule(filepath.Join(td, "foo"), "registry/local/sub")
if err != nil {
@ -45,5 +50,140 @@ func TestGetModule(t *testing.T) {
if err != nil {
t.Fatal(err)
}
}
// GitHub archives always contain the module source in a single subdirectory,
// so the registry will return a path with with a `//*` suffix. We need to make
// sure this doesn't intefere with our internal handling of `//` subdir.
func TestRegistryGitHubArchive(t *testing.T) {
server := test.Registry()
defer server.Close()
disco := test.Disco(server)
storage := testStorage(t, disco)
tree := NewTree("", testConfig(t, "registry-tar-subdir"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
storage.Mode = GetModeNone
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
// stop the registry server, and make sure that we don't need to call out again
server.Close()
tree = NewTree("", testConfig(t, "registry-tar-subdir"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
actual := strings.TrimSpace(tree.String())
expected := strings.TrimSpace(treeLoadSubdirStr)
if actual != expected {
t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected)
}
}
// Test that the //subdir notation can be used with registry modules
func TestRegisryModuleSubdir(t *testing.T) {
server := test.Registry()
defer server.Close()
disco := test.Disco(server)
storage := testStorage(t, disco)
tree := NewTree("", testConfig(t, "registry-subdir"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
storage.Mode = GetModeNone
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
actual := strings.TrimSpace(tree.String())
expected := strings.TrimSpace(treeLoadRegistrySubdirStr)
if actual != expected {
t.Fatalf("got: \n\n%s\nexpected: \n\n%s", actual, expected)
}
}
func TestAccRegistryDiscover(t *testing.T) {
if os.Getenv("TF_ACC") == "" {
t.Skip("skipping ACC test")
}
// simply check that we get a valid github URL for this from the registry
module, err := regsrc.ParseModuleSource("hashicorp/consul/aws")
if err != nil {
t.Fatal(err)
}
s := NewStorage("/tmp", nil, nil)
loc, err := s.registry.Location(module, "")
if err != nil {
t.Fatal(err)
}
u, err := url.Parse(loc)
if err != nil {
t.Fatal(err)
}
if !strings.HasSuffix(u.Host, "github.com") {
t.Fatalf("expected host 'github.com', got: %q", u.Host)
}
if !strings.Contains(u.String(), "consul") {
t.Fatalf("url doesn't contain 'consul': %s", u.String())
}
}
func TestAccRegistryLoad(t *testing.T) {
if os.Getenv("TF_ACC") == "" {
t.Skip("skipping ACC test")
}
storage := testStorage(t, nil)
tree := NewTree("", testConfig(t, "registry-load"))
storage.Mode = GetModeGet
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
if !tree.Loaded() {
t.Fatal("should be loaded")
}
storage.Mode = GetModeNone
if err := tree.Load(storage); err != nil {
t.Fatalf("err: %s", err)
}
// TODO expand this further by fetching some metadata from the registry
actual := strings.TrimSpace(tree.String())
if !strings.Contains(actual, "(path: vault)") {
t.Fatal("missing vault module, got:\n", actual)
}
}

View File

@ -2,7 +2,7 @@
package config
import "fmt"
import "strconv"
const _ResourceMode_name = "ManagedResourceModeDataResourceMode"
@ -10,7 +10,7 @@ var _ResourceMode_index = [...]uint8{0, 19, 35}
func (i ResourceMode) String() string {
if i < 0 || i >= ResourceMode(len(_ResourceMode_index)-1) {
return fmt.Sprintf("ResourceMode(%d)", i)
return "ResourceMode(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _ResourceMode_name[_ResourceMode_index[i]:_ResourceMode_index[i+1]]
}

View File

@ -32,7 +32,6 @@ func configDir() (string, error) {
func homeDir() (string, error) {
// First prefer the HOME environmental variable
if home := os.Getenv("HOME"); home != "" {
// FIXME: homeDir gets called from globalPluginDirs during init, before
// the logging is setup. We should move meta initializtion outside of
// init, but in the meantime we just need to silence this output.

View File

@ -2,7 +2,7 @@
package schema
import "fmt"
import "strconv"
const (
_getSource_name_0 = "getSourceStategetSourceConfig"
@ -13,8 +13,6 @@ const (
var (
_getSource_index_0 = [...]uint8{0, 14, 29}
_getSource_index_1 = [...]uint8{0, 13}
_getSource_index_2 = [...]uint8{0, 12}
_getSource_index_3 = [...]uint8{0, 18, 32}
)
@ -31,6 +29,6 @@ func (i getSource) String() string {
i -= 15
return _getSource_name_3[_getSource_index_3[i]:_getSource_index_3[i+1]]
default:
return fmt.Sprintf("getSource(%d)", i)
return "getSource(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

View File

@ -282,7 +282,6 @@ func (r *Resource) ReadDataApply(
d *terraform.InstanceDiff,
meta interface{},
) (*terraform.InstanceState, error) {
// Data sources are always built completely from scratch
// on each read, so the source state is always nil.
data, err := schemaMap(r.Schema).Data(nil, d)

View File

@ -445,7 +445,7 @@ func (d *ResourceData) init() {
}
func (d *ResourceData) diffChange(
k string) (interface{}, interface{}, bool, bool) {
k string) (interface{}, interface{}, bool, bool, bool) {
// Get the change between the state and the config.
o, n := d.getChange(k, getSourceState, getSourceConfig|getSourceExact)
if !o.Exists {
@ -456,7 +456,7 @@ func (d *ResourceData) diffChange(
}
// Return the old, new, and whether there is a change
return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed
return o.Value, n.Value, !reflect.DeepEqual(o.Value, n.Value), n.Computed, false
}
func (d *ResourceData) getChange(

View File

@ -236,8 +236,8 @@ func (d *ResourceDiff) clear(key string) error {
// diffChange helps to implement resourceDiffer and derives its change values
// from ResourceDiff's own change data, in addition to existing diff, config, and state.
func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool) {
old, new := d.getChange(key)
func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, bool, bool) {
old, new, customized := d.getChange(key)
if !old.Exists {
old.Value = nil
@ -246,7 +246,7 @@ func (d *ResourceDiff) diffChange(key string) (interface{}, interface{}, bool, b
new.Value = nil
}
return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed
return old.Value, new.Value, !reflect.DeepEqual(old.Value, new.Value), new.Computed, customized
}
// SetNew is used to set a new diff value for the mentioned key. The value must
@ -327,7 +327,7 @@ func (d *ResourceDiff) Get(key string) interface{} {
// results from the exact levels for the new diff, then from state and diff as
// per normal.
func (d *ResourceDiff) GetChange(key string) (interface{}, interface{}) {
old, new := d.getChange(key)
old, new, _ := d.getChange(key)
return old.Value, new.Value
}
@ -387,18 +387,17 @@ func (d *ResourceDiff) Id() string {
// This implementation differs from ResourceData's in the way that we first get
// results from the exact levels for the new diff, then from state and diff as
// per normal.
func (d *ResourceDiff) getChange(key string) (getResult, getResult) {
func (d *ResourceDiff) getChange(key string) (getResult, getResult, bool) {
old := d.get(strings.Split(key, "."), "state")
var new getResult
for p := range d.updatedKeys {
if childAddrOf(key, p) {
new = d.getExact(strings.Split(key, "."), "newDiff")
goto done
return old, new, true
}
}
new = d.get(strings.Split(key, "."), "newDiff")
done:
return old, new
return old, new, false
}
// get performs the appropriate multi-level reader logic for ResourceDiff,

View File

@ -296,8 +296,7 @@ func (s *Schema) ZeroValue() interface{} {
}
}
func (s *Schema) finalizeDiff(
d *terraform.ResourceAttrDiff) *terraform.ResourceAttrDiff {
func (s *Schema) finalizeDiff(d *terraform.ResourceAttrDiff, customized bool) *terraform.ResourceAttrDiff {
if d == nil {
return d
}
@ -337,14 +336,21 @@ func (s *Schema) finalizeDiff(
return d
}
if s.Computed && !d.NewComputed {
if s.Computed {
// FIXME: This is where the customized bool from getChange finally
// comes into play. It allows the previously incorrect behavior
// of an empty string being used as "unset" when the value is
// computed. This should be removed once we can properly
// represent an unset/nil value from the configuration.
if !customized {
if d.Old != "" && d.New == "" {
// This is a computed value with an old value set already,
// just let it go.
return nil
}
}
if d.New == "" {
if d.New == "" && !d.NewComputed {
// Computed attribute without a new value set
d.NewComputed = true
}
@ -744,7 +750,7 @@ func isValidFieldName(name string) bool {
// This helps facilitate diff logic for both ResourceData and ResoureDiff with
// minimal divergence in code.
type resourceDiffer interface {
diffChange(string) (interface{}, interface{}, bool, bool)
diffChange(string) (interface{}, interface{}, bool, bool, bool)
Get(string) interface{}
GetChange(string) (interface{}, interface{})
GetOk(string) (interface{}, bool)
@ -797,7 +803,7 @@ func (m schemaMap) diffList(
diff *terraform.InstanceDiff,
d resourceDiffer,
all bool) error {
o, n, _, computedList := d.diffChange(k)
o, n, _, computedList, customized := d.diffChange(k)
if computedList {
n = nil
}
@ -864,10 +870,13 @@ func (m schemaMap) diffList(
oldStr = ""
}
diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
diff.Attributes[k+".#"] = countSchema.finalizeDiff(
&terraform.ResourceAttrDiff{
Old: oldStr,
New: newStr,
})
},
customized,
)
}
// Figure out the maximum
@ -920,7 +929,7 @@ func (m schemaMap) diffMap(
// First get all the values from the state
var stateMap, configMap map[string]string
o, n, _, nComputed := d.diffChange(k)
o, n, _, nComputed, customized := d.diffChange(k)
if err := mapstructure.WeakDecode(o, &stateMap); err != nil {
return fmt.Errorf("%s: %s", k, err)
}
@ -972,6 +981,7 @@ func (m schemaMap) diffMap(
Old: oldStr,
New: newStr,
},
customized,
)
}
@ -989,16 +999,22 @@ func (m schemaMap) diffMap(
continue
}
diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
diff.Attributes[prefix+k] = schema.finalizeDiff(
&terraform.ResourceAttrDiff{
Old: old,
New: v,
})
},
customized,
)
}
for k, v := range stateMap {
diff.Attributes[prefix+k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
diff.Attributes[prefix+k] = schema.finalizeDiff(
&terraform.ResourceAttrDiff{
Old: v,
NewRemoved: true,
})
},
customized,
)
}
return nil
@ -1011,7 +1027,7 @@ func (m schemaMap) diffSet(
d resourceDiffer,
all bool) error {
o, n, _, computedSet := d.diffChange(k)
o, n, _, computedSet, customized := d.diffChange(k)
if computedSet {
n = nil
}
@ -1070,20 +1086,26 @@ func (m schemaMap) diffSet(
countStr = ""
}
diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
diff.Attributes[k+".#"] = countSchema.finalizeDiff(
&terraform.ResourceAttrDiff{
Old: countStr,
NewComputed: true,
})
},
customized,
)
return nil
}
// If the counts are not the same, then record that diff
changed := oldLen != newLen
if changed || all {
diff.Attributes[k+".#"] = countSchema.finalizeDiff(&terraform.ResourceAttrDiff{
diff.Attributes[k+".#"] = countSchema.finalizeDiff(
&terraform.ResourceAttrDiff{
Old: oldStr,
New: newStr,
})
},
customized,
)
}
// Build the list of codes that will make up our set. This is the
@ -1133,7 +1155,7 @@ func (m schemaMap) diffString(
all bool) error {
var originalN interface{}
var os, ns string
o, n, _, computed := d.diffChange(k)
o, n, _, computed, customized := d.diffChange(k)
if schema.StateFunc != nil && n != nil {
originalN = n
n = schema.StateFunc(n)
@ -1170,13 +1192,16 @@ func (m schemaMap) diffString(
return nil
}
diff.Attributes[k] = schema.finalizeDiff(&terraform.ResourceAttrDiff{
diff.Attributes[k] = schema.finalizeDiff(
&terraform.ResourceAttrDiff{
Old: os,
New: ns,
NewExtra: originalN,
NewRemoved: removed,
NewComputed: computed,
})
},
customized,
)
return nil
}
@ -1503,7 +1528,6 @@ func (m schemaMap) validatePrimitive(
raw interface{},
schema *Schema,
c *terraform.ResourceConfig) ([]string, []error) {
// Catch if the user gave a complex type where a primitive was
// expected, so we can return a friendly error message that
// doesn't contain Go type system terminology.

View File

@ -3110,6 +3110,38 @@ func TestSchemaMap_Diff(t *testing.T) {
Err: true,
},
// A lot of resources currently depended on using the empty string as a
// nil/unset value.
// FIXME: We want this to eventually produce a diff, since there
// technically is a new value in the config.
{
Name: "optional, computed, empty string",
Schema: map[string]*Schema{
"attr": &Schema{
Type: TypeString,
Optional: true,
Computed: true,
},
},
State: &terraform.InstanceState{
Attributes: map[string]string{
"attr": "bar",
},
},
// this does necessarily depend on an interpolated value, but this
// is often how it comes about in a configuration, otherwise the
// value would be unset.
Config: map[string]interface{}{
"attr": "${var.foo}",
},
ConfigVariables: map[string]ast.Variable{
"var.foo": interfaceToVariableSwallowError(""),
},
},
}
for i, tc := range cases {

View File

@ -2,7 +2,7 @@
package schema
import "fmt"
import "strconv"
const _ValueType_name = "TypeInvalidTypeBoolTypeIntTypeFloatTypeStringTypeListTypeMapTypeSettypeObject"
@ -10,7 +10,7 @@ var _ValueType_index = [...]uint8{0, 11, 19, 26, 35, 45, 53, 60, 67, 77}
func (i ValueType) String() string {
if i < 0 || i >= ValueType(len(_ValueType_index)-1) {
return fmt.Sprintf("ValueType(%d)", i)
return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]]
}

View File

@ -1,4 +1,4 @@
package module
package registry
import (
"encoding/json"
@ -12,75 +12,75 @@ import (
"time"
cleanhttp "github.com/hashicorp/go-cleanhttp"
"github.com/hashicorp/terraform/registry/regsrc"
"github.com/hashicorp/terraform/registry/response"
"github.com/hashicorp/terraform/svchost"
"github.com/hashicorp/terraform/svchost/auth"
"github.com/hashicorp/terraform/svchost/disco"
"github.com/hashicorp/terraform/version"
)
const (
defaultRegistry = "registry.terraform.io"
registryServiceID = "registry.v1"
xTerraformGet = "X-Terraform-Get"
xTerraformVersion = "X-Terraform-Version"
requestTimeout = 10 * time.Second
serviceID = "modules.v1"
)
var (
httpClient *http.Client
tfVersion = version.String()
)
var tfVersion = version.String()
func init() {
httpClient = cleanhttp.DefaultPooledClient()
httpClient.Timeout = requestTimeout
// Client provides methods to query Terraform Registries.
type Client struct {
// this is the client to be used for all requests.
client *http.Client
// services is a required *disco.Disco, which may have services and
// credentials pre-loaded.
services *disco.Disco
// Creds optionally provides credentials for communicating with service
// providers.
creds auth.CredentialsSource
}
type errModuleNotFound string
func (e errModuleNotFound) Error() string {
return `module "` + string(e) + `" not found`
}
func (s *Storage) discoverRegURL(host svchost.Hostname) *url.URL {
regURL := s.Services.DiscoverServiceURL(host, serviceID)
if regURL == nil {
return nil
func NewClient(services *disco.Disco, creds auth.CredentialsSource, client *http.Client) *Client {
if services == nil {
services = disco.NewDisco()
}
if !strings.HasSuffix(regURL.Path, "/") {
regURL.Path += "/"
services.SetCredentialsSource(creds)
if client == nil {
client = cleanhttp.DefaultPooledClient()
client.Timeout = requestTimeout
}
return regURL
}
services.Transport = client.Transport.(*http.Transport)
func (s *Storage) addRequestCreds(host svchost.Hostname, req *http.Request) {
if s.Creds == nil {
return
}
creds, err := s.Creds.ForHost(host)
if err != nil {
log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err)
return
}
if creds != nil {
creds.PrepareRequest(req)
return &Client{
client: client,
services: services,
creds: creds,
}
}
// Lookup module versions in the registry.
func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleVersions, error) {
// Discover qeuries the host, and returns the url for the registry.
func (c *Client) Discover(host svchost.Hostname) *url.URL {
service := c.services.DiscoverServiceURL(host, serviceID)
if !strings.HasSuffix(service.Path, "/") {
service.Path += "/"
}
return service
}
// Versions queries the registry for a module, and returns the available versions.
func (c *Client) Versions(module *regsrc.Module) (*response.ModuleVersions, error) {
host, err := module.SvcHost()
if err != nil {
return nil, err
}
service := s.discoverRegURL(host)
service := c.Discover(host)
if service == nil {
return nil, fmt.Errorf("host %s does not provide Terraform modules", host)
}
@ -99,10 +99,10 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV
return nil, err
}
s.addRequestCreds(host, req)
c.addRequestCreds(host, req)
req.Header.Set(xTerraformVersion, tfVersion)
resp, err := httpClient.Do(req)
resp, err := c.client.Do(req)
if err != nil {
return nil, err
}
@ -112,7 +112,7 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV
case http.StatusOK:
// OK
case http.StatusNotFound:
return nil, errModuleNotFound(module.String())
return nil, fmt.Errorf("module %q not found", module.String())
default:
return nil, fmt.Errorf("error looking up module versions: %s", resp.Status)
}
@ -133,14 +133,31 @@ func (s *Storage) lookupModuleVersions(module *regsrc.Module) (*response.ModuleV
return &versions, nil
}
// lookup the location of a specific module version in the registry
func (s *Storage) lookupModuleLocation(module *regsrc.Module, version string) (string, error) {
func (c *Client) addRequestCreds(host svchost.Hostname, req *http.Request) {
if c.creds == nil {
return
}
creds, err := c.creds.ForHost(host)
if err != nil {
log.Printf("[WARNING] Failed to get credentials for %s: %s (ignoring)", host, err)
return
}
if creds != nil {
creds.PrepareRequest(req)
}
}
// Location find the download location for a specific version module.
// This returns a string, because the final location may contain special go-getter syntax.
func (c *Client) Location(module *regsrc.Module, version string) (string, error) {
host, err := module.SvcHost()
if err != nil {
return "", err
}
service := s.discoverRegURL(host)
service := c.Discover(host)
if service == nil {
return "", fmt.Errorf("host %s does not provide Terraform modules", host.ForDisplay())
}
@ -163,10 +180,10 @@ func (s *Storage) lookupModuleLocation(module *regsrc.Module, version string) (s
return "", err
}
s.addRequestCreds(host, req)
c.addRequestCreds(host, req)
req.Header.Set(xTerraformVersion, tfVersion)
resp, err := httpClient.Do(req)
resp, err := c.client.Do(req)
if err != nil {
return "", err
}

View File

@ -1,4 +1,4 @@
package module
package registry
import (
"os"
@ -7,16 +7,15 @@ import (
version "github.com/hashicorp/go-version"
"github.com/hashicorp/terraform/registry/regsrc"
"github.com/hashicorp/terraform/svchost"
"github.com/hashicorp/terraform/svchost/auth"
"github.com/hashicorp/terraform/registry/test"
"github.com/hashicorp/terraform/svchost/disco"
)
func TestLookupModuleVersions(t *testing.T) {
server := mockRegistry()
server := test.Registry()
defer server.Close()
regDisco := testDisco(server)
client := NewClient(test.Disco(server), nil, nil)
// test with and without a hostname
for _, src := range []string{
@ -28,8 +27,7 @@ func TestLookupModuleVersions(t *testing.T) {
t.Fatal(err)
}
s := &Storage{Services: regDisco}
resp, err := s.lookupModuleVersions(modsrc)
resp, err := client.Versions(modsrc)
if err != nil {
t.Fatal(err)
}
@ -58,11 +56,10 @@ func TestLookupModuleVersions(t *testing.T) {
}
func TestRegistryAuth(t *testing.T) {
server := mockRegistry()
server := test.Registry()
defer server.Close()
regDisco := testDisco(server)
storage := testStorage(t, regDisco)
client := NewClient(test.Disco(server), nil, nil)
src := "private/name/provider"
mod, err := regsrc.ParseModuleSource(src)
@ -71,36 +68,32 @@ func TestRegistryAuth(t *testing.T) {
}
// both should fail without auth
_, err = storage.lookupModuleVersions(mod)
_, err = client.Versions(mod)
if err == nil {
t.Fatal("expected error")
}
_, err = storage.lookupModuleLocation(mod, "1.0.0")
_, err = client.Location(mod, "1.0.0")
if err == nil {
t.Fatal("expected error")
}
storage.Creds = auth.StaticCredentialsSource(map[svchost.Hostname]map[string]interface{}{
svchost.Hostname(defaultRegistry): {"token": testCredentials},
})
client = NewClient(test.Disco(server), test.Credentials, nil)
_, err = storage.lookupModuleVersions(mod)
_, err = client.Versions(mod)
if err != nil {
t.Fatal(err)
}
_, err = storage.lookupModuleLocation(mod, "1.0.0")
_, err = client.Location(mod, "1.0.0")
if err != nil {
t.Fatal(err)
}
}
func TestLookupModuleLocationRelative(t *testing.T) {
server := mockRegistry()
server := test.Registry()
defer server.Close()
regDisco := testDisco(server)
storage := testStorage(t, regDisco)
client := NewClient(test.Disco(server), nil, nil)
src := "relative/foo/bar"
mod, err := regsrc.ParseModuleSource(src)
@ -108,7 +101,7 @@ func TestLookupModuleLocationRelative(t *testing.T) {
t.Fatal(err)
}
got, err := storage.lookupModuleLocation(mod, "0.2.0")
got, err := client.Location(mod, "0.2.0")
if err != nil {
t.Fatal(err)
}
@ -117,7 +110,6 @@ func TestLookupModuleLocationRelative(t *testing.T) {
if got != want {
t.Errorf("wrong location %s; want %s", got, want)
}
}
func TestAccLookupModuleVersions(t *testing.T) {
@ -129,17 +121,15 @@ func TestAccLookupModuleVersions(t *testing.T) {
// test with and without a hostname
for _, src := range []string{
"terraform-aws-modules/vpc/aws",
defaultRegistry + "/terraform-aws-modules/vpc/aws",
regsrc.PublicRegistryHost.String() + "/terraform-aws-modules/vpc/aws",
} {
modsrc, err := regsrc.ParseModuleSource(src)
if err != nil {
t.Fatal(err)
}
s := &Storage{
Services: regDisco,
}
resp, err := s.lookupModuleVersions(modsrc)
s := NewClient(regDisco, nil, nil)
resp, err := s.Versions(modsrc)
if err != nil {
t.Fatal(err)
}
@ -169,11 +159,10 @@ func TestAccLookupModuleVersions(t *testing.T) {
// the error should reference the config source exatly, not the discovered path.
func TestLookupLookupModuleError(t *testing.T) {
server := mockRegistry()
server := test.Registry()
defer server.Close()
regDisco := testDisco(server)
storage := testStorage(t, regDisco)
client := NewClient(test.Disco(server), nil, nil)
// this should not be found in teh registry
src := "bad/local/path"
@ -182,7 +171,7 @@ func TestLookupLookupModuleError(t *testing.T) {
t.Fatal(err)
}
_, err = storage.lookupModuleLocation(mod, "0.2.0")
_, err = client.Location(mod, "0.2.0")
if err == nil {
t.Fatal("expected error")
}

View File

@ -145,13 +145,8 @@ func NewContext(opts *ContextOpts) (*Context, error) {
// If our state is from the future, then error. Callers can avoid
// this error by explicitly setting `StateFutureAllowed`.
if !opts.StateFutureAllowed && state.FromFutureTerraform() {
return nil, fmt.Errorf(
"Terraform doesn't allow running any operations against a state\n"+
"that was written by a future Terraform version. The state is\n"+
"reporting it is written by Terraform '%s'.\n\n"+
"Please run at least that version of Terraform to continue.",
state.TFVersion)
if err := CheckStateVersion(state); err != nil && !opts.StateFutureAllowed {
return nil, err
}
// Explicitly reset our state version to our current version so that

View File

@ -3883,7 +3883,6 @@ func TestContext2Apply_outputDependsOn(t *testing.T) {
info *InstanceInfo,
is *InstanceState,
id *InstanceDiff) (*InstanceState, error) {
// Sleep to allow parallel execution
time.Sleep(50 * time.Millisecond)

View File

@ -2401,6 +2401,32 @@ func TestContext2Plan_hook(t *testing.T) {
}
}
func TestContext2Plan_closeProvider(t *testing.T) {
// this fixture only has an aliased provider located in the module, to make
// sure that the provier name contains a path more complex than
// "provider.aws".
m := testModule(t, "plan-close-module-provider")
p := testProvider("aws")
p.DiffFn = testDiffFn
ctx := testContext2(t, &ContextOpts{
Module: m,
ProviderResolver: ResourceProviderResolverFixed(
map[string]ResourceProviderFactory{
"aws": testProviderFuncFixed(p),
},
),
})
_, err := ctx.Plan()
if err != nil {
t.Fatalf("err: %s", err)
}
if !p.CloseCalled {
t.Fatal("provider not closed")
}
}
func TestContext2Plan_orphan(t *testing.T) {
m := testModule(t, "plan-orphan")
p := testProvider("aws")

View File

@ -396,6 +396,11 @@ type ResourceAttrDiff struct {
Type DiffAttrType
}
// Modified returns the inequality of Old and New for this attr
func (d *ResourceAttrDiff) Modified() bool {
return d.Old != d.New
}
// Empty returns true if the diff for this attr is neutral
func (d *ResourceAttrDiff) Empty() bool {
return d.Old == d.New && !d.NewComputed && !d.NewRemoved

View File

@ -258,12 +258,14 @@ func (n *EvalDiff) processIgnoreChanges(diff *InstanceDiff) error {
for _, v := range containers {
if v.keepDiff() {
// At least one key has changes, so list all the sibling keys
// to keep in the diff.
// to keep in the diff if any values have changed
for k := range v {
if v[k].Modified() {
keep[k] = true
}
}
}
}
for k, v := range attrs {
if (v.Empty() || v.NewComputed) && !keep[k] {

View File

@ -3,6 +3,8 @@ package terraform
import (
"reflect"
"testing"
"github.com/hashicorp/terraform/config"
)
func TestEvalFilterDiff(t *testing.T) {
@ -76,3 +78,69 @@ func TestEvalFilterDiff(t *testing.T) {
}
}
}
func TestProcessIgnoreChangesOnResourceIgnoredWithRequiresNew(t *testing.T) {
var evalDiff *EvalDiff
var instanceDiff *InstanceDiff
var testDiffs = func(ignoreChanges []string, newAttribute string) (*EvalDiff, *InstanceDiff) {
return &EvalDiff{
Resource: &config.Resource{
Lifecycle: config.ResourceLifecycle{
IgnoreChanges: ignoreChanges,
},
},
},
&InstanceDiff{
Destroy: true,
Attributes: map[string]*ResourceAttrDiff{
"resource.changed": {
RequiresNew: true,
Type: DiffAttrInput,
Old: "old",
New: "new",
},
"resource.unchanged": {
Old: "unchanged",
New: newAttribute,
},
},
}
}
evalDiff, instanceDiff = testDiffs([]string{"resource.changed"}, "unchanged")
err := evalDiff.processIgnoreChanges(instanceDiff)
if err != nil {
t.Fatalf("err: %s", err)
}
if len(instanceDiff.Attributes) > 0 {
t.Fatalf("Expected all resources to be ignored, found %d", len(instanceDiff.Attributes))
}
evalDiff, instanceDiff = testDiffs([]string{}, "unchanged")
err = evalDiff.processIgnoreChanges(instanceDiff)
if err != nil {
t.Fatalf("err: %s", err)
}
if len(instanceDiff.Attributes) != 2 {
t.Fatalf("Expected 2 resources to be found, found %d", len(instanceDiff.Attributes))
}
evalDiff, instanceDiff = testDiffs([]string{"resource.changed"}, "changed")
err = evalDiff.processIgnoreChanges(instanceDiff)
if err != nil {
t.Fatalf("err: %s", err)
}
if len(instanceDiff.Attributes) != 1 {
t.Fatalf("Expected 1 resource to be found, found %d", len(instanceDiff.Attributes))
}
evalDiff, instanceDiff = testDiffs([]string{}, "changed")
err = evalDiff.processIgnoreChanges(instanceDiff)
if err != nil {
t.Fatalf("err: %s", err)
}
if len(instanceDiff.Attributes) != 2 {
t.Fatalf("Expected 2 resource to be found, found %d", len(instanceDiff.Attributes))
}
}

View File

@ -90,7 +90,8 @@ func TestEvalInitProvider(t *testing.T) {
}
func TestEvalCloseProvider(t *testing.T) {
n := &EvalCloseProvider{Name: "foo"}
providerName := ResolveProviderName("foo", nil)
n := &EvalCloseProvider{Name: providerName}
provider := &MockResourceProvider{}
ctx := &MockEvalContext{CloseProviderProvider: provider}
if _, err := n.Eval(ctx); err != nil {
@ -100,7 +101,7 @@ func TestEvalCloseProvider(t *testing.T) {
if !ctx.CloseProviderCalled {
t.Fatal("should be called")
}
if ctx.CloseProviderName != "foo" {
if ctx.CloseProviderName != providerName {
t.Fatalf("bad: %#v", ctx.CloseProviderName)
}
}

View File

@ -150,6 +150,7 @@ func (n *EvalValidateProvisioner) validateConnConfig(connConfig *ResourceConfig)
BastionUser interface{} `mapstructure:"bastion_user"`
BastionPassword interface{} `mapstructure:"bastion_password"`
BastionPrivateKey interface{} `mapstructure:"bastion_private_key"`
AgentIdentity interface{} `mapstructure:"agent_identity"`
// For type=winrm only (enforced in winrm communicator)
HTTPS interface{} `mapstructure:"https"`

View File

@ -2,7 +2,7 @@
package terraform
import "fmt"
import "strconv"
const _GraphType_name = "GraphTypeInvalidGraphTypeLegacyGraphTypeRefreshGraphTypePlanGraphTypePlanDestroyGraphTypeApplyGraphTypeInputGraphTypeValidate"
@ -10,7 +10,7 @@ var _GraphType_index = [...]uint8{0, 16, 31, 47, 60, 80, 94, 108, 125}
func (i GraphType) String() string {
if i >= GraphType(len(_GraphType_index)-1) {
return fmt.Sprintf("GraphType(%d)", i)
return "GraphType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _GraphType_name[_GraphType_index[i]:_GraphType_index[i+1]]
}

View File

@ -2,7 +2,7 @@
package terraform
import "fmt"
import "strconv"
const _InstanceType_name = "TypeInvalidTypePrimaryTypeTaintedTypeDeposed"
@ -10,7 +10,7 @@ var _InstanceType_index = [...]uint8{0, 11, 22, 33, 44}
func (i InstanceType) String() string {
if i < 0 || i >= InstanceType(len(_InstanceType_index)-1) {
return fmt.Sprintf("InstanceType(%d)", i)
return "InstanceType(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _InstanceType_name[_InstanceType_index[i]:_InstanceType_index[i+1]]
}

View File

@ -142,7 +142,6 @@ func (i *Interpolater) valueModuleVar(
n string,
v *config.ModuleVariable,
result map[string]ast.Variable) error {
// Build the path to the child module we want
path := make([]string, len(scope.Path), len(scope.Path)+1)
copy(path, scope.Path)
@ -319,7 +318,6 @@ func (i *Interpolater) valueTerraformVar(
n string,
v *config.TerraformVariable,
result map[string]ast.Variable) error {
// "env" is supported for backward compatibility, but it's deprecated and
// so we won't advertise it as being allowed in the error message. It will
// be removed in a future version of Terraform.
@ -701,7 +699,6 @@ func (i *Interpolater) computeResourceMultiVariable(
func (i *Interpolater) interpolateComplexTypeAttribute(
resourceID string,
attributes map[string]string) (ast.Variable, error) {
// We can now distinguish between lists and maps in state by the count field:
// - lists (and by extension, sets) use the traditional .# notation
// - maps use the newer .% notation

View File

@ -17,7 +17,6 @@ import (
// present in the configuration. This is guaranteed not to happen for any
// configuration that has passed a call to Config.Validate().
func ModuleTreeDependencies(root *module.Tree, state *State) *moduledeps.Module {
// First we walk the configuration tree to build the overall structure
// and capture the explicit/implicit/inherited provider dependencies.
deps := moduleTreeConfigDependencies(root, nil)

View File

@ -346,7 +346,7 @@ func (c *ResourceConfig) get(
if err != nil {
return nil, false
}
if i >= int64(cv.Len()) {
if int(i) < 0 || int(i) >= cv.Len() {
return nil, false
}
current = cv.Index(int(i)).Interface()

View File

@ -203,6 +203,7 @@ func (p *MockResourceProvider) Diff(
p.DiffInfo = info
p.DiffState = state
p.DiffDesired = desired
if p.DiffFn != nil {
return p.DiffFn(info, state, desired)
}

View File

@ -158,6 +158,14 @@ func TestResourceConfigGet(t *testing.T) {
Value: nil,
},
{
Config: map[string]interface{}{
"foo": []interface{}{1, 2, 5},
},
Key: "foo.-1",
Value: nil,
},
// get from map
{
Config: map[string]interface{}{

View File

@ -2174,6 +2174,19 @@ func (s moduleStateSort) Swap(i, j int) {
s[i], s[j] = s[j], s[i]
}
// StateCompatible returns an error if the state is not compatible with the
// current version of terraform.
func CheckStateVersion(state *State) error {
if state == nil {
return nil
}
if state.FromFutureTerraform() {
return fmt.Errorf(stateInvalidTerraformVersionErr, state.TFVersion)
}
return nil
}
const stateValidateErrMultiModule = `
Multiple modules with the same path: %s
@ -2182,3 +2195,11 @@ in your state file that point to the same module. This will cause Terraform
to behave in unexpected and error prone ways and is invalid. Please back up
and modify your state file manually to resolve this.
`
const stateInvalidTerraformVersionErr = `
Terraform doesn't allow running any operations against a state
that was written by a future Terraform version. The state is
reporting it is written by Terraform '%s'
Please run at least that version of Terraform to continue.
`

View File

@ -1675,10 +1675,7 @@ aws_instance.foo:
const testTFPlanDiffIgnoreChangesWithFlatmaps = `
UPDATE: aws_instance.foo
lst.#: "1" => "2"
lst.0: "j" => "j"
lst.1: "" => "k"
set.#: "1" => "1"
set.0.a: "1" => "1"
set.0.b: "" => "2"
type: "" => "aws_instance"
`

View File

@ -138,13 +138,13 @@ func (t *CloseProviderTransformer) Transform(g *Graph) error {
p := v.(GraphNodeProvider)
// get the close provider of this type if we alread created it
closer := cpm[p.ProviderName()]
closer := cpm[p.Name()]
if closer == nil {
// create a closer for this provider type
closer = &graphNodeCloseProvider{ProviderNameValue: p.ProviderName()}
closer = &graphNodeCloseProvider{ProviderNameValue: p.Name()}
g.Add(closer)
cpm[p.ProviderName()] = closer
cpm[p.Name()] = closer
}
// Close node depends on the provider itself
@ -336,7 +336,7 @@ type graphNodeCloseProvider struct {
}
func (n *graphNodeCloseProvider) Name() string {
return fmt.Sprintf("provider.%s (close)", n.ProviderNameValue)
return n.ProviderNameValue + " (close)"
}
// GraphNodeEvalable impl.

View File

@ -2,7 +2,7 @@
package terraform
import "fmt"
import "strconv"
const _walkOperation_name = "walkInvalidwalkInputwalkApplywalkPlanwalkPlanDestroywalkRefreshwalkValidatewalkDestroywalkImport"
@ -10,7 +10,7 @@ var _walkOperation_index = [...]uint8{0, 11, 20, 29, 37, 52, 63, 75, 86, 96}
func (i walkOperation) String() string {
if i >= walkOperation(len(_walkOperation_index)-1) {
return fmt.Sprintf("walkOperation(%d)", i)
return "walkOperation(" + strconv.FormatInt(int64(i), 10) + ")"
}
return _walkOperation_name[_walkOperation_index[i]:_walkOperation_index[i+1]]
}

View File

@ -2,18 +2,13 @@
package tfdiags
import "fmt"
import "strconv"
const (
_Severity_name_0 = "Error"
_Severity_name_1 = "Warning"
)
var (
_Severity_index_0 = [...]uint8{0, 5}
_Severity_index_1 = [...]uint8{0, 7}
)
func (i Severity) String() string {
switch {
case i == 69:
@ -21,6 +16,6 @@ func (i Severity) String() string {
case i == 87:
return _Severity_name_1
default:
return fmt.Sprintf("Severity(%d)", i)
return "Severity(" + strconv.FormatInt(int64(i), 10) + ")"
}
}

View File

@ -11,7 +11,7 @@ import (
)
// The main version number that is being run at the moment.
const Version = "0.11.1"
const Version = "0.11.2"
// A pre-release marker for the version. If this is "" (empty string)
// then it means that it is a final release. Otherwise, this is a pre-release

View File

@ -35,7 +35,7 @@ buildfuzz:
go-fuzz-build github.com/jmespath/go-jmespath/fuzz
fuzz: buildfuzz
go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/testdata
go-fuzz -bin=./jmespath-fuzz.zip -workdir=fuzz/corpus
bench:
go test -bench . -cpuprofile cpu.out

View File

@ -1,42 +1,5 @@
package jmespath
import "strconv"
// JmesPath is the epresentation of a compiled JMES path query. A JmesPath is
// safe for concurrent use by multiple goroutines.
type JMESPath struct {
ast ASTNode
intr *treeInterpreter
}
// Compile parses a JMESPath expression and returns, if successful, a JMESPath
// object that can be used to match against data.
func Compile(expression string) (*JMESPath, error) {
parser := NewParser()
ast, err := parser.Parse(expression)
if err != nil {
return nil, err
}
jmespath := &JMESPath{ast: ast, intr: newInterpreter()}
return jmespath, nil
}
// MustCompile is like Compile but panics if the expression cannot be parsed.
// It simplifies safe initialization of global variables holding compiled
// JMESPaths.
func MustCompile(expression string) *JMESPath {
jmespath, err := Compile(expression)
if err != nil {
panic(`jmespath: Compile(` + strconv.Quote(expression) + `): ` + err.Error())
}
return jmespath
}
// Search evaluates a JMESPath expression against input data and returns the result.
func (jp *JMESPath) Search(data interface{}) (interface{}, error) {
return jp.intr.Execute(jp.ast, data)
}
// Search evaluates a JMESPath expression against input data and returns the result.
func Search(expression string, data interface{}) (interface{}, error) {
intr := newInterpreter()

View File

@ -1,32 +0,0 @@
package jmespath
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestValidPrecompiledExpressionSearches(t *testing.T) {
assert := assert.New(t)
data := make(map[string]interface{})
data["foo"] = "bar"
precompiled, err := Compile("foo")
assert.Nil(err)
result, err := precompiled.Search(data)
assert.Nil(err)
assert.Equal("bar", result)
}
func TestInvalidPrecompileErrors(t *testing.T) {
assert := assert.New(t)
_, err := Compile("not a valid expression")
assert.NotNil(err)
}
func TestInvalidMustCompilePanics(t *testing.T) {
defer func() {
r := recover()
assert.NotNil(t, r)
}()
MustCompile("not a valid expression")
}

View File

@ -5,7 +5,6 @@ import (
"errors"
"fmt"
"math"
"reflect"
"sort"
"strconv"
"strings"
@ -125,197 +124,197 @@ type functionCaller struct {
func newFunctionCaller() *functionCaller {
caller := &functionCaller{}
caller.functionTable = map[string]functionEntry{
"length": {
"length": functionEntry{
name: "length",
arguments: []argSpec{
{types: []jpType{jpString, jpArray, jpObject}},
argSpec{types: []jpType{jpString, jpArray, jpObject}},
},
handler: jpfLength,
},
"starts_with": {
"starts_with": functionEntry{
name: "starts_with",
arguments: []argSpec{
{types: []jpType{jpString}},
{types: []jpType{jpString}},
argSpec{types: []jpType{jpString}},
argSpec{types: []jpType{jpString}},
},
handler: jpfStartsWith,
},
"abs": {
"abs": functionEntry{
name: "abs",
arguments: []argSpec{
{types: []jpType{jpNumber}},
argSpec{types: []jpType{jpNumber}},
},
handler: jpfAbs,
},
"avg": {
"avg": functionEntry{
name: "avg",
arguments: []argSpec{
{types: []jpType{jpArrayNumber}},
argSpec{types: []jpType{jpArrayNumber}},
},
handler: jpfAvg,
},
"ceil": {
"ceil": functionEntry{
name: "ceil",
arguments: []argSpec{
{types: []jpType{jpNumber}},
argSpec{types: []jpType{jpNumber}},
},
handler: jpfCeil,
},
"contains": {
"contains": functionEntry{
name: "contains",
arguments: []argSpec{
{types: []jpType{jpArray, jpString}},
{types: []jpType{jpAny}},
argSpec{types: []jpType{jpArray, jpString}},
argSpec{types: []jpType{jpAny}},
},
handler: jpfContains,
},
"ends_with": {
"ends_with": functionEntry{
name: "ends_with",
arguments: []argSpec{
{types: []jpType{jpString}},
{types: []jpType{jpString}},
argSpec{types: []jpType{jpString}},
argSpec{types: []jpType{jpString}},
},
handler: jpfEndsWith,
},
"floor": {
"floor": functionEntry{
name: "floor",
arguments: []argSpec{
{types: []jpType{jpNumber}},
argSpec{types: []jpType{jpNumber}},
},
handler: jpfFloor,
},
"map": {
"map": functionEntry{
name: "amp",
arguments: []argSpec{
{types: []jpType{jpExpref}},
{types: []jpType{jpArray}},
argSpec{types: []jpType{jpExpref}},
argSpec{types: []jpType{jpArray}},
},
handler: jpfMap,
hasExpRef: true,
},
"max": {
"max": functionEntry{
name: "max",
arguments: []argSpec{
{types: []jpType{jpArrayNumber, jpArrayString}},
argSpec{types: []jpType{jpArrayNumber, jpArrayString}},
},
handler: jpfMax,
},
"merge": {
"merge": functionEntry{
name: "merge",
arguments: []argSpec{
{types: []jpType{jpObject}, variadic: true},
argSpec{types: []jpType{jpObject}, variadic: true},
},
handler: jpfMerge,
},
"max_by": {
"max_by": functionEntry{
name: "max_by",
arguments: []argSpec{
{types: []jpType{jpArray}},
{types: []jpType{jpExpref}},
argSpec{types: []jpType{jpArray}},
argSpec{types: []jpType{jpExpref}},
},
handler: jpfMaxBy,
hasExpRef: true,
},
"sum": {
"sum": functionEntry{
name: "sum",
arguments: []argSpec{
{types: []jpType{jpArrayNumber}},
argSpec{types: []jpType{jpArrayNumber}},
},
handler: jpfSum,
},
"min": {
"min": functionEntry{
name: "min",
arguments: []argSpec{
{types: []jpType{jpArrayNumber, jpArrayString}},
argSpec{types: []jpType{jpArrayNumber, jpArrayString}},
},
handler: jpfMin,
},
"min_by": {
"min_by": functionEntry{
name: "min_by",
arguments: []argSpec{
{types: []jpType{jpArray}},
{types: []jpType{jpExpref}},
argSpec{types: []jpType{jpArray}},
argSpec{types: []jpType{jpExpref}},
},
handler: jpfMinBy,
hasExpRef: true,
},
"type": {
"type": functionEntry{
name: "type",
arguments: []argSpec{
{types: []jpType{jpAny}},
argSpec{types: []jpType{jpAny}},
},
handler: jpfType,
},
"keys": {
"keys": functionEntry{
name: "keys",
arguments: []argSpec{
{types: []jpType{jpObject}},
argSpec{types: []jpType{jpObject}},
},
handler: jpfKeys,
},
"values": {
"values": functionEntry{
name: "values",
arguments: []argSpec{
{types: []jpType{jpObject}},
argSpec{types: []jpType{jpObject}},
},
handler: jpfValues,
},
"sort": {
"sort": functionEntry{
name: "sort",
arguments: []argSpec{
{types: []jpType{jpArrayString, jpArrayNumber}},
argSpec{types: []jpType{jpArrayString, jpArrayNumber}},
},
handler: jpfSort,
},
"sort_by": {
"sort_by": functionEntry{
name: "sort_by",
arguments: []argSpec{
{types: []jpType{jpArray}},
{types: []jpType{jpExpref}},
argSpec{types: []jpType{jpArray}},
argSpec{types: []jpType{jpExpref}},
},
handler: jpfSortBy,
hasExpRef: true,
},
"join": {
"join": functionEntry{
name: "join",
arguments: []argSpec{
{types: []jpType{jpString}},
{types: []jpType{jpArrayString}},
argSpec{types: []jpType{jpString}},
argSpec{types: []jpType{jpArrayString}},
},
handler: jpfJoin,
},
"reverse": {
"reverse": functionEntry{
name: "reverse",
arguments: []argSpec{
{types: []jpType{jpArray, jpString}},
argSpec{types: []jpType{jpArray, jpString}},
},
handler: jpfReverse,
},
"to_array": {
"to_array": functionEntry{
name: "to_array",
arguments: []argSpec{
{types: []jpType{jpAny}},
argSpec{types: []jpType{jpAny}},
},
handler: jpfToArray,
},
"to_string": {
"to_string": functionEntry{
name: "to_string",
arguments: []argSpec{
{types: []jpType{jpAny}},
argSpec{types: []jpType{jpAny}},
},
handler: jpfToString,
},
"to_number": {
"to_number": functionEntry{
name: "to_number",
arguments: []argSpec{
{types: []jpType{jpAny}},
argSpec{types: []jpType{jpAny}},
},
handler: jpfToNumber,
},
"not_null": {
"not_null": functionEntry{
name: "not_null",
arguments: []argSpec{
{types: []jpType{jpAny}, variadic: true},
argSpec{types: []jpType{jpAny}, variadic: true},
},
handler: jpfNotNull,
},
@ -358,7 +357,7 @@ func (a *argSpec) typeCheck(arg interface{}) error {
return nil
}
case jpArray:
if isSliceType(arg) {
if _, ok := arg.([]interface{}); ok {
return nil
}
case jpObject:
@ -410,9 +409,8 @@ func jpfLength(arguments []interface{}) (interface{}, error) {
arg := arguments[0]
if c, ok := arg.(string); ok {
return float64(utf8.RuneCountInString(c)), nil
} else if isSliceType(arg) {
v := reflect.ValueOf(arg)
return float64(v.Len()), nil
} else if c, ok := arg.([]interface{}); ok {
return float64(len(c)), nil
} else if c, ok := arg.(map[string]interface{}); ok {
return float64(len(c)), nil
}

View File

@ -69,7 +69,7 @@ func TestCanSupportUserDefinedStructsRef(t *testing.T) {
func TestCanSupportStructWithSliceAll(t *testing.T) {
assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}}
result, err := Search("B[].Foo", data)
assert.Nil(err)
assert.Equal([]interface{}{"f1", "correct"}, result)
@ -77,7 +77,7 @@ func TestCanSupportStructWithSliceAll(t *testing.T) {
func TestCanSupportStructWithSlicingExpression(t *testing.T) {
assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}}
result, err := Search("B[:].Foo", data)
assert.Nil(err)
assert.Equal([]interface{}{"f1", "correct"}, result)
@ -85,7 +85,7 @@ func TestCanSupportStructWithSlicingExpression(t *testing.T) {
func TestCanSupportStructWithFilterProjection(t *testing.T) {
assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}}
result, err := Search("B[? `true` ].Foo", data)
assert.Nil(err)
assert.Equal([]interface{}{"f1", "correct"}, result)
@ -93,7 +93,7 @@ func TestCanSupportStructWithFilterProjection(t *testing.T) {
func TestCanSupportStructWithSlice(t *testing.T) {
assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}}
result, err := Search("B[-1].Foo", data)
assert.Nil(err)
assert.Equal("correct", result)
@ -109,7 +109,7 @@ func TestCanSupportStructWithOrExpressions(t *testing.T) {
func TestCanSupportStructWithSlicePointer(t *testing.T) {
assert := assert.New(t)
data := sliceType{A: "foo", C: []*scalars{{"f1", "b1"}, {"correct", "b2"}}}
data := sliceType{A: "foo", C: []*scalars{&scalars{"f1", "b1"}, &scalars{"correct", "b2"}}}
result, err := Search("C[-1].Foo", data)
assert.Nil(err)
assert.Equal("correct", result)
@ -128,7 +128,7 @@ func TestWillAutomaticallyCapitalizeFieldNames(t *testing.T) {
func TestCanSupportStructWithSliceLowerCased(t *testing.T) {
assert := assert.New(t)
data := sliceType{A: "foo", B: []scalars{{"f1", "b1"}, {"correct", "b2"}}}
data := sliceType{A: "foo", B: []scalars{scalars{"f1", "b1"}, scalars{"correct", "b2"}}}
result, err := Search("b[-1].foo", data)
assert.Nil(err)
assert.Equal("correct", result)
@ -173,14 +173,6 @@ func TestCanSupportProjectionsWithStructs(t *testing.T) {
assert.Equal([]interface{}{"first", "second", "third"}, result)
}
func TestCanSupportSliceOfStructsWithFunctions(t *testing.T) {
assert := assert.New(t)
data := []scalars{scalars{"a1", "b1"}, scalars{"a2", "b2"}}
result, err := Search("length(@)", data)
assert.Nil(err)
assert.Equal(result.(float64), 2.0)
}
func BenchmarkInterpretSingleFieldStruct(b *testing.B) {
intr := newInterpreter()
parser := NewParser()

View File

@ -11,63 +11,63 @@ var lexingTests = []struct {
expression string
expected []token
}{
{"*", []token{{tStar, "*", 0, 1}}},
{".", []token{{tDot, ".", 0, 1}}},
{"[?", []token{{tFilter, "[?", 0, 2}}},
{"[]", []token{{tFlatten, "[]", 0, 2}}},
{"(", []token{{tLparen, "(", 0, 1}}},
{")", []token{{tRparen, ")", 0, 1}}},
{"[", []token{{tLbracket, "[", 0, 1}}},
{"]", []token{{tRbracket, "]", 0, 1}}},
{"{", []token{{tLbrace, "{", 0, 1}}},
{"}", []token{{tRbrace, "}", 0, 1}}},
{"||", []token{{tOr, "||", 0, 2}}},
{"|", []token{{tPipe, "|", 0, 1}}},
{"29", []token{{tNumber, "29", 0, 2}}},
{"2", []token{{tNumber, "2", 0, 1}}},
{"0", []token{{tNumber, "0", 0, 1}}},
{"-20", []token{{tNumber, "-20", 0, 3}}},
{"foo", []token{{tUnquotedIdentifier, "foo", 0, 3}}},
{`"bar"`, []token{{tQuotedIdentifier, "bar", 0, 3}}},
{"*", []token{token{tStar, "*", 0, 1}}},
{".", []token{token{tDot, ".", 0, 1}}},
{"[?", []token{token{tFilter, "[?", 0, 2}}},
{"[]", []token{token{tFlatten, "[]", 0, 2}}},
{"(", []token{token{tLparen, "(", 0, 1}}},
{")", []token{token{tRparen, ")", 0, 1}}},
{"[", []token{token{tLbracket, "[", 0, 1}}},
{"]", []token{token{tRbracket, "]", 0, 1}}},
{"{", []token{token{tLbrace, "{", 0, 1}}},
{"}", []token{token{tRbrace, "}", 0, 1}}},
{"||", []token{token{tOr, "||", 0, 2}}},
{"|", []token{token{tPipe, "|", 0, 1}}},
{"29", []token{token{tNumber, "29", 0, 2}}},
{"2", []token{token{tNumber, "2", 0, 1}}},
{"0", []token{token{tNumber, "0", 0, 1}}},
{"-20", []token{token{tNumber, "-20", 0, 3}}},
{"foo", []token{token{tUnquotedIdentifier, "foo", 0, 3}}},
{`"bar"`, []token{token{tQuotedIdentifier, "bar", 0, 3}}},
// Escaping the delimiter
{`"bar\"baz"`, []token{{tQuotedIdentifier, `bar"baz`, 0, 7}}},
{",", []token{{tComma, ",", 0, 1}}},
{":", []token{{tColon, ":", 0, 1}}},
{"<", []token{{tLT, "<", 0, 1}}},
{"<=", []token{{tLTE, "<=", 0, 2}}},
{">", []token{{tGT, ">", 0, 1}}},
{">=", []token{{tGTE, ">=", 0, 2}}},
{"==", []token{{tEQ, "==", 0, 2}}},
{"!=", []token{{tNE, "!=", 0, 2}}},
{"`[0, 1, 2]`", []token{{tJSONLiteral, "[0, 1, 2]", 1, 9}}},
{"'foo'", []token{{tStringLiteral, "foo", 1, 3}}},
{"'a'", []token{{tStringLiteral, "a", 1, 1}}},
{`'foo\'bar'`, []token{{tStringLiteral, "foo'bar", 1, 7}}},
{"@", []token{{tCurrent, "@", 0, 1}}},
{"&", []token{{tExpref, "&", 0, 1}}},
{`"bar\"baz"`, []token{token{tQuotedIdentifier, `bar"baz`, 0, 7}}},
{",", []token{token{tComma, ",", 0, 1}}},
{":", []token{token{tColon, ":", 0, 1}}},
{"<", []token{token{tLT, "<", 0, 1}}},
{"<=", []token{token{tLTE, "<=", 0, 2}}},
{">", []token{token{tGT, ">", 0, 1}}},
{">=", []token{token{tGTE, ">=", 0, 2}}},
{"==", []token{token{tEQ, "==", 0, 2}}},
{"!=", []token{token{tNE, "!=", 0, 2}}},
{"`[0, 1, 2]`", []token{token{tJSONLiteral, "[0, 1, 2]", 1, 9}}},
{"'foo'", []token{token{tStringLiteral, "foo", 1, 3}}},
{"'a'", []token{token{tStringLiteral, "a", 1, 1}}},
{`'foo\'bar'`, []token{token{tStringLiteral, "foo'bar", 1, 7}}},
{"@", []token{token{tCurrent, "@", 0, 1}}},
{"&", []token{token{tExpref, "&", 0, 1}}},
// Quoted identifier unicode escape sequences
{`"\u2713"`, []token{{tQuotedIdentifier, "✓", 0, 3}}},
{`"\\"`, []token{{tQuotedIdentifier, `\`, 0, 1}}},
{"`\"foo\"`", []token{{tJSONLiteral, "\"foo\"", 1, 5}}},
{`"\u2713"`, []token{token{tQuotedIdentifier, "✓", 0, 3}}},
{`"\\"`, []token{token{tQuotedIdentifier, `\`, 0, 1}}},
{"`\"foo\"`", []token{token{tJSONLiteral, "\"foo\"", 1, 5}}},
// Combinations of tokens.
{"foo.bar", []token{
{tUnquotedIdentifier, "foo", 0, 3},
{tDot, ".", 3, 1},
{tUnquotedIdentifier, "bar", 4, 3},
token{tUnquotedIdentifier, "foo", 0, 3},
token{tDot, ".", 3, 1},
token{tUnquotedIdentifier, "bar", 4, 3},
}},
{"foo[0]", []token{
{tUnquotedIdentifier, "foo", 0, 3},
{tLbracket, "[", 3, 1},
{tNumber, "0", 4, 1},
{tRbracket, "]", 5, 1},
token{tUnquotedIdentifier, "foo", 0, 3},
token{tLbracket, "[", 3, 1},
token{tNumber, "0", 4, 1},
token{tRbracket, "]", 5, 1},
}},
{"foo[?a<b]", []token{
{tUnquotedIdentifier, "foo", 0, 3},
{tFilter, "[?", 3, 2},
{tUnquotedIdentifier, "a", 5, 1},
{tLT, "<", 6, 1},
{tUnquotedIdentifier, "b", 7, 1},
{tRbracket, "]", 8, 1},
token{tUnquotedIdentifier, "foo", 0, 3},
token{tFilter, "[?", 3, 2},
token{tUnquotedIdentifier, "a", 5, 1},
token{tLT, "<", 6, 1},
token{tUnquotedIdentifier, "b", 7, 1},
token{tRbracket, "]", 8, 1},
}},
}

View File

@ -353,7 +353,7 @@ func (p *Parser) nud(token token) (ASTNode, error) {
case tFlatten:
left := ASTNode{
nodeType: ASTFlatten,
children: []ASTNode{{nodeType: ASTIdentity}},
children: []ASTNode{ASTNode{nodeType: ASTIdentity}},
}
right, err := p.parseProjectionRHS(bindingPowers[tFlatten])
if err != nil {
@ -378,7 +378,7 @@ func (p *Parser) nud(token token) (ASTNode, error) {
}
return ASTNode{
nodeType: ASTProjection,
children: []ASTNode{{nodeType: ASTIdentity}, right},
children: []ASTNode{ASTNode{nodeType: ASTIdentity}, right},
}, nil
} else {
return p.parseMultiSelectList()

View File

@ -13,7 +13,7 @@ func TestSlicePositiveStep(t *testing.T) {
input[2] = 2
input[3] = 3
input[4] = 4
result, err := slice(input, []sliceParam{{0, true}, {3, true}, {1, true}})
result, err := slice(input, []sliceParam{sliceParam{0, true}, sliceParam{3, true}, sliceParam{1, true}})
assert.Nil(err)
assert.Equal(input[:3], result)
}

View File

@ -57,7 +57,7 @@ func (h *extensionHandlerClient) Send(hType mint.HandshakeType, el *mint.Extensi
func (h *extensionHandlerClient) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error {
ext := &tlsExtensionBody{}
found, _ := el.Find(ext)
found := el.Find(ext)
if hType != mint.HandshakeTypeEncryptedExtensions && hType != mint.HandshakeTypeNewSessionTicket {
if found {

View File

@ -66,7 +66,7 @@ func (h *extensionHandlerServer) Send(hType mint.HandshakeType, el *mint.Extensi
func (h *extensionHandlerServer) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error {
ext := &tlsExtensionBody{}
found, _ := el.Find(ext)
found := el.Find(ext)
if hType != mint.HandshakeTypeClientHello {
if found {

1654
vendor/github.com/magefile/mage/build/build.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

446
vendor/github.com/magefile/mage/build/build_test.go generated vendored Normal file
View File

@ -0,0 +1,446 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"io"
"os"
"path/filepath"
"reflect"
"runtime"
"strings"
"testing"
)
func TestMatch(t *testing.T) {
ctxt := Default
what := "default"
match := func(tag string, want map[string]bool) {
m := make(map[string]bool)
if !ctxt.match(tag, m) {
t.Errorf("%s context should match %s, does not", what, tag)
}
if !reflect.DeepEqual(m, want) {
t.Errorf("%s tags = %v, want %v", tag, m, want)
}
}
nomatch := func(tag string, want map[string]bool) {
m := make(map[string]bool)
if ctxt.match(tag, m) {
t.Errorf("%s context should NOT match %s, does", what, tag)
}
if !reflect.DeepEqual(m, want) {
t.Errorf("%s tags = %v, want %v", tag, m, want)
}
}
match(runtime.GOOS+","+runtime.GOARCH, map[string]bool{runtime.GOOS: true, runtime.GOARCH: true})
match(runtime.GOOS+","+runtime.GOARCH+",!foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
nomatch(runtime.GOOS+","+runtime.GOARCH+",foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
what = "modified"
ctxt.BuildTags = []string{"foo"}
match(runtime.GOOS+","+runtime.GOARCH, map[string]bool{runtime.GOOS: true, runtime.GOARCH: true})
match(runtime.GOOS+","+runtime.GOARCH+",foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
nomatch(runtime.GOOS+","+runtime.GOARCH+",!foo", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "foo": true})
match(runtime.GOOS+","+runtime.GOARCH+",!bar", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "bar": true})
nomatch(runtime.GOOS+","+runtime.GOARCH+",bar", map[string]bool{runtime.GOOS: true, runtime.GOARCH: true, "bar": true})
nomatch("!", map[string]bool{})
}
func TestDotSlashImport(t *testing.T) {
p, err := ImportDir("testdata/other", 0)
if err != nil {
t.Fatal(err)
}
if len(p.Imports) != 1 || p.Imports[0] != "./file" {
t.Fatalf("testdata/other: Imports=%v, want [./file]", p.Imports)
}
p1, err := Import("./file", "testdata/other", 0)
if err != nil {
t.Fatal(err)
}
if p1.Name != "file" {
t.Fatalf("./file: Name=%q, want %q", p1.Name, "file")
}
dir := filepath.Clean("testdata/other/file") // Clean to use \ on Windows
if p1.Dir != dir {
t.Fatalf("./file: Dir=%q, want %q", p1.Name, dir)
}
}
func TestEmptyImport(t *testing.T) {
p, err := Import("", Default.GOROOT, FindOnly)
if err == nil {
t.Fatal(`Import("") returned nil error.`)
}
if p == nil {
t.Fatal(`Import("") returned nil package.`)
}
if p.ImportPath != "" {
t.Fatalf("ImportPath=%q, want %q.", p.ImportPath, "")
}
}
func TestEmptyFolderImport(t *testing.T) {
_, err := Import(".", "testdata/empty", 0)
if _, ok := err.(*NoGoError); !ok {
t.Fatal(`Import("testdata/empty") did not return NoGoError.`)
}
}
func TestMultiplePackageImport(t *testing.T) {
_, err := Import(".", "testdata/multi", 0)
mpe, ok := err.(*MultiplePackageError)
if !ok {
t.Fatal(`Import("testdata/multi") did not return MultiplePackageError.`)
}
want := &MultiplePackageError{
Dir: filepath.FromSlash("testdata/multi"),
Packages: []string{"main", "test_package"},
Files: []string{"file.go", "file_appengine.go"},
}
if !reflect.DeepEqual(mpe, want) {
t.Errorf("got %#v; want %#v", mpe, want)
}
}
func TestLocalDirectory(t *testing.T) {
if runtime.GOOS == "darwin" {
switch runtime.GOARCH {
case "arm", "arm64":
t.Skipf("skipping on %s/%s, no valid GOROOT", runtime.GOOS, runtime.GOARCH)
}
}
cwd, err := os.Getwd()
if err != nil {
t.Fatal(err)
}
p, err := ImportDir(cwd, 0)
if err != nil {
t.Fatal(err)
}
if p.ImportPath != "github.com/magefile/mage/build" {
t.Fatalf("ImportPath=%q, want %q", p.ImportPath, "github.com/magefile/mage/build")
}
}
func TestShouldBuild(t *testing.T) {
const file1 = "// +build tag1\n\n" +
"package main\n"
want1 := map[string]bool{"tag1": true}
const file2 = "// +build cgo\n\n" +
"// This package implements parsing of tags like\n" +
"// +build tag1\n" +
"package build"
want2 := map[string]bool{"cgo": true}
const file3 = "// Copyright The Go Authors.\n\n" +
"package build\n\n" +
"// shouldBuild checks tags given by lines of the form\n" +
"// +build tag\n" +
"func shouldBuild(content []byte)\n"
want3 := map[string]bool{}
ctx := &Context{BuildTags: []string{"tag1"}}
m := map[string]bool{}
if !ctx.shouldBuild([]byte(file1), m, nil) {
t.Errorf("shouldBuild(file1) = false, want true")
}
if !reflect.DeepEqual(m, want1) {
t.Errorf("shouldBuild(file1) tags = %v, want %v", m, want1)
}
m = map[string]bool{}
if ctx.shouldBuild([]byte(file2), m, nil) {
t.Errorf("shouldBuild(file2) = true, want false")
}
if !reflect.DeepEqual(m, want2) {
t.Errorf("shouldBuild(file2) tags = %v, want %v", m, want2)
}
m = map[string]bool{}
ctx = &Context{BuildTags: nil}
if !ctx.shouldBuild([]byte(file3), m, nil) {
t.Errorf("shouldBuild(file3) = false, want true")
}
if !reflect.DeepEqual(m, want3) {
t.Errorf("shouldBuild(file3) tags = %v, want %v", m, want3)
}
}
func TestRequiredTags(t *testing.T) {
tests := []struct {
name string
file string
should bool
}{
{
"no req tag",
"package main",
false,
},
{
"has req tag",
`// +build req
package main`,
true,
},
{
"OR with req",
`// +build no req
package main`,
true,
},
}
ctx := &Context{
BuildTags: []string{"req"},
RequiredTags: []string{"req"},
}
for _, tst := range tests {
t.Run(tst.name, func(t *testing.T) {
if tst.should != ctx.shouldBuild([]byte(tst.file), nil, nil) {
t.Errorf("shouldBuild = %v, want %v", !tst.should, tst.should)
}
})
}
}
type readNopCloser struct {
io.Reader
}
func (r readNopCloser) Close() error {
return nil
}
var (
ctxtP9 = Context{GOARCH: "arm", GOOS: "plan9"}
ctxtAndroid = Context{GOARCH: "arm", GOOS: "android"}
)
var matchFileTests = []struct {
ctxt Context
name string
data string
match bool
}{
{ctxtP9, "foo_arm.go", "", true},
{ctxtP9, "foo1_arm.go", "// +build linux\n\npackage main\n", false},
{ctxtP9, "foo_darwin.go", "", false},
{ctxtP9, "foo.go", "", true},
{ctxtP9, "foo1.go", "// +build linux\n\npackage main\n", false},
{ctxtP9, "foo.badsuffix", "", false},
{ctxtAndroid, "foo_linux.go", "", true},
{ctxtAndroid, "foo_android.go", "", true},
{ctxtAndroid, "foo_plan9.go", "", false},
{ctxtAndroid, "android.go", "", true},
{ctxtAndroid, "plan9.go", "", true},
{ctxtAndroid, "plan9_test.go", "", true},
{ctxtAndroid, "arm.s", "", true},
{ctxtAndroid, "amd64.s", "", true},
}
func TestMatchFile(t *testing.T) {
for _, tt := range matchFileTests {
ctxt := tt.ctxt
ctxt.OpenFile = func(path string) (r io.ReadCloser, err error) {
if path != "x+"+tt.name {
t.Fatalf("OpenFile asked for %q, expected %q", path, "x+"+tt.name)
}
return &readNopCloser{strings.NewReader(tt.data)}, nil
}
ctxt.JoinPath = func(elem ...string) string {
return strings.Join(elem, "+")
}
match, err := ctxt.MatchFile("x", tt.name)
if match != tt.match || err != nil {
t.Fatalf("MatchFile(%q) = %v, %v, want %v, nil", tt.name, match, err, tt.match)
}
}
}
func TestImportCmd(t *testing.T) {
if runtime.GOOS == "darwin" {
switch runtime.GOARCH {
case "arm", "arm64":
t.Skipf("skipping on %s/%s, no valid GOROOT", runtime.GOOS, runtime.GOARCH)
}
}
p, err := Import("cmd/internal/objfile", "", 0)
if err != nil {
t.Fatal(err)
}
if !strings.HasSuffix(filepath.ToSlash(p.Dir), "src/cmd/internal/objfile") {
t.Fatalf("Import cmd/internal/objfile returned Dir=%q, want %q", filepath.ToSlash(p.Dir), ".../src/cmd/internal/objfile")
}
}
var (
expandSrcDirPath = filepath.Join(string(filepath.Separator)+"projects", "src", "add")
)
var expandSrcDirTests = []struct {
input, expected string
}{
{"-L ${SRCDIR}/libs -ladd", "-L /projects/src/add/libs -ladd"},
{"${SRCDIR}/add_linux_386.a -pthread -lstdc++", "/projects/src/add/add_linux_386.a -pthread -lstdc++"},
{"Nothing to expand here!", "Nothing to expand here!"},
{"$", "$"},
{"$$", "$$"},
{"${", "${"},
{"$}", "$}"},
{"$FOO ${BAR}", "$FOO ${BAR}"},
{"Find me the $SRCDIRECTORY.", "Find me the $SRCDIRECTORY."},
{"$SRCDIR is missing braces", "$SRCDIR is missing braces"},
}
func TestExpandSrcDir(t *testing.T) {
for _, test := range expandSrcDirTests {
output, _ := expandSrcDir(test.input, expandSrcDirPath)
if output != test.expected {
t.Errorf("%q expands to %q with SRCDIR=%q when %q is expected", test.input, output, expandSrcDirPath, test.expected)
} else {
t.Logf("%q expands to %q with SRCDIR=%q", test.input, output, expandSrcDirPath)
}
}
}
func TestShellSafety(t *testing.T) {
tests := []struct {
input, srcdir, expected string
result bool
}{
{"-I${SRCDIR}/../include", "/projects/src/issue 11868", "-I/projects/src/issue 11868/../include", true},
{"-I${SRCDIR}", "wtf$@%", "-Iwtf$@%", true},
{"-X${SRCDIR}/1,${SRCDIR}/2", "/projects/src/issue 11868", "-X/projects/src/issue 11868/1,/projects/src/issue 11868/2", true},
{"-I/tmp -I/tmp", "/tmp2", "-I/tmp -I/tmp", true},
{"-I/tmp", "/tmp/[0]", "-I/tmp", true},
{"-I${SRCDIR}/dir", "/tmp/[0]", "-I/tmp/[0]/dir", false},
{"-I${SRCDIR}/dir", "/tmp/go go", "-I/tmp/go go/dir", true},
{"-I${SRCDIR}/dir dir", "/tmp/go", "-I/tmp/go/dir dir", true},
}
for _, test := range tests {
output, ok := expandSrcDir(test.input, test.srcdir)
if ok != test.result {
t.Errorf("Expected %t while %q expands to %q with SRCDIR=%q; got %t", test.result, test.input, output, test.srcdir, ok)
}
if output != test.expected {
t.Errorf("Expected %q while %q expands with SRCDIR=%q; got %q", test.expected, test.input, test.srcdir, output)
}
}
}
// Want to get a "cannot find package" error when directory for package does not exist.
// There should be valid partial information in the returned non-nil *Package.
func TestImportDirNotExist(t *testing.T) {
MustHaveGoBuild(t) // really must just have source
ctxt := Default
ctxt.GOPATH = ""
tests := []struct {
label string
path, srcDir string
mode ImportMode
}{
{"Import(full, 0)", "go/build/doesnotexist", "", 0},
{"Import(local, 0)", "./doesnotexist", filepath.Join(ctxt.GOROOT, "src/go/build"), 0},
{"Import(full, FindOnly)", "go/build/doesnotexist", "", FindOnly},
{"Import(local, FindOnly)", "./doesnotexist", filepath.Join(ctxt.GOROOT, "src/go/build"), FindOnly},
}
for _, test := range tests {
p, err := ctxt.Import(test.path, test.srcDir, test.mode)
if err == nil || !strings.HasPrefix(err.Error(), "cannot find package") {
t.Errorf(`%s got error: %q, want "cannot find package" error`, test.label, err)
}
// If an error occurs, build.Import is documented to return
// a non-nil *Package containing partial information.
if p == nil {
t.Fatalf(`%s got nil p, want non-nil *Package`, test.label)
}
// Verify partial information in p.
if p.ImportPath != "go/build/doesnotexist" {
t.Errorf(`%s got p.ImportPath: %q, want "go/build/doesnotexist"`, test.label, p.ImportPath)
}
}
}
func TestImportVendor(t *testing.T) {
MustHaveGoBuild(t) // really must just have source
ctxt := Default
ctxt.GOPATH = ""
p, err := ctxt.Import("golang_org/x/net/http2/hpack", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
if err != nil {
t.Fatalf("cannot find vendored golang_org/x/net/http2/hpack from net/http directory: %v", err)
}
want := "vendor/golang_org/x/net/http2/hpack"
if p.ImportPath != want {
t.Fatalf("Import succeeded but found %q, want %q", p.ImportPath, want)
}
}
func TestImportVendorFailure(t *testing.T) {
MustHaveGoBuild(t) // really must just have source
ctxt := Default
ctxt.GOPATH = ""
p, err := ctxt.Import("x.com/y/z", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
if err == nil {
t.Fatalf("found made-up package x.com/y/z in %s", p.Dir)
}
e := err.Error()
if !strings.Contains(e, " (vendor tree)") {
t.Fatalf("error on failed import does not mention GOROOT/src/vendor directory:\n%s", e)
}
}
func TestImportVendorParentFailure(t *testing.T) {
MustHaveGoBuild(t) // really must just have source
ctxt := Default
ctxt.GOPATH = ""
// This import should fail because the vendor/golang.org/x/net/http2 directory has no source code.
p, err := ctxt.Import("golang_org/x/net/http2", filepath.Join(ctxt.GOROOT, "src/net/http"), 0)
if err == nil {
t.Fatalf("found empty parent in %s", p.Dir)
}
if p != nil && p.Dir != "" {
t.Fatalf("decided to use %s", p.Dir)
}
e := err.Error()
if !strings.Contains(e, " (vendor tree)") {
t.Fatalf("error on failed import does not mention GOROOT/src/vendor directory:\n%s", e)
}
}
// HasGoBuild reports whether the current system can build programs with ``go build''
// and then run them with os.StartProcess or exec.Command.
func HasGoBuild() bool {
switch runtime.GOOS {
case "android", "nacl":
return false
case "darwin":
if strings.HasPrefix(runtime.GOARCH, "arm") {
return false
}
}
return true
}
// MustHaveGoBuild checks that the current system can build programs with ``go build''
// and then run them with os.StartProcess or exec.Command.
// If not, MustHaveGoBuild calls t.Skip with an explanation.
func MustHaveGoBuild(t *testing.T) {
if !HasGoBuild() {
t.Skipf("skipping test: 'go build' not available on %s/%s", runtime.GOOS, runtime.GOARCH)
}
}

556
vendor/github.com/magefile/mage/build/deps_test.go generated vendored Normal file
View File

@ -0,0 +1,556 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file exercises the import parser but also checks that
// some low-level packages do not have new dependencies added.
package build
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"strconv"
"strings"
)
// pkgDeps defines the expected dependencies between packages in
// the Go source tree. It is a statement of policy.
// Changes should not be made to this map without prior discussion.
//
// The map contains two kinds of entries:
// 1) Lower-case keys are standard import paths and list the
// allowed imports in that package.
// 2) Upper-case keys define aliases for package sets, which can then
// be used as dependencies by other rules.
//
// DO NOT CHANGE THIS DATA TO FIX BUILDS.
//
var pkgDeps = map[string][]string{
// L0 is the lowest level, core, nearly unavoidable packages.
"errors": {},
"io": {"errors", "sync"},
"runtime": {"unsafe", "runtime/internal/atomic", "runtime/internal/sys"},
"runtime/internal/sys": {},
"runtime/internal/atomic": {"unsafe", "runtime/internal/sys"},
"internal/race": {"runtime", "unsafe"},
"sync": {"internal/race", "runtime", "sync/atomic", "unsafe"},
"sync/atomic": {"unsafe"},
"unsafe": {},
"internal/cpu": {"runtime"},
"L0": {
"errors",
"io",
"runtime",
"runtime/internal/atomic",
"sync",
"sync/atomic",
"unsafe",
"internal/cpu",
},
// L1 adds simple functions and strings processing,
// but not Unicode tables.
"math": {"internal/cpu", "unsafe"},
"math/bits": {},
"math/cmplx": {"math"},
"math/rand": {"L0", "math"},
"strconv": {"L0", "unicode/utf8", "math"},
"unicode/utf16": {},
"unicode/utf8": {},
"L1": {
"L0",
"math",
"math/bits",
"math/cmplx",
"math/rand",
"sort",
"strconv",
"unicode/utf16",
"unicode/utf8",
},
// L2 adds Unicode and strings processing.
"bufio": {"L0", "unicode/utf8", "bytes"},
"bytes": {"L0", "unicode", "unicode/utf8"},
"path": {"L0", "unicode/utf8", "strings"},
"strings": {"L0", "unicode", "unicode/utf8"},
"unicode": {},
"L2": {
"L1",
"bufio",
"bytes",
"path",
"strings",
"unicode",
},
// L3 adds reflection and some basic utility packages
// and interface definitions, but nothing that makes
// system calls.
"crypto": {"L2", "hash"}, // interfaces
"crypto/cipher": {"L2", "crypto/subtle"},
"crypto/subtle": {},
"encoding/base32": {"L2"},
"encoding/base64": {"L2"},
"encoding/binary": {"L2", "reflect"},
"hash": {"L2"}, // interfaces
"hash/adler32": {"L2", "hash"},
"hash/crc32": {"L2", "hash"},
"hash/crc64": {"L2", "hash"},
"hash/fnv": {"L2", "hash"},
"image": {"L2", "image/color"}, // interfaces
"image/color": {"L2"}, // interfaces
"image/color/palette": {"L2", "image/color"},
"reflect": {"L2"},
"sort": {"reflect"},
"L3": {
"L2",
"crypto",
"crypto/cipher",
"crypto/internal/cipherhw",
"crypto/subtle",
"encoding/base32",
"encoding/base64",
"encoding/binary",
"hash",
"hash/adler32",
"hash/crc32",
"hash/crc64",
"hash/fnv",
"image",
"image/color",
"image/color/palette",
"reflect",
},
// End of linear dependency definitions.
// Operating system access.
"syscall": {"L0", "internal/race", "internal/syscall/windows/sysdll", "unicode/utf16"},
"internal/syscall/unix": {"L0", "syscall"},
"internal/syscall/windows": {"L0", "syscall", "internal/syscall/windows/sysdll"},
"internal/syscall/windows/registry": {"L0", "syscall", "internal/syscall/windows/sysdll", "unicode/utf16"},
"time": {
// "L0" without the "io" package:
"errors",
"runtime",
"runtime/internal/atomic",
"sync",
"sync/atomic",
"unsafe",
// Other time dependencies:
"internal/syscall/windows/registry",
"syscall",
},
"internal/poll": {"L0", "internal/race", "syscall", "time", "unicode/utf16", "unicode/utf8"},
"os": {"L1", "os", "syscall", "time", "internal/poll", "internal/syscall/windows"},
"path/filepath": {"L2", "os", "syscall"},
"io/ioutil": {"L2", "os", "path/filepath", "time"},
"os/exec": {"L2", "os", "context", "path/filepath", "syscall"},
"os/signal": {"L2", "os", "syscall"},
// OS enables basic operating system functionality,
// but not direct use of package syscall, nor os/signal.
"OS": {
"io/ioutil",
"os",
"os/exec",
"path/filepath",
"time",
},
// Formatted I/O: few dependencies (L1) but we must add reflect.
"fmt": {"L1", "os", "reflect"},
"log": {"L1", "os", "fmt", "time"},
// Packages used by testing must be low-level (L2+fmt).
"regexp": {"L2", "regexp/syntax"},
"regexp/syntax": {"L2"},
"runtime/debug": {"L2", "fmt", "io/ioutil", "os", "time"},
"runtime/pprof": {"L2", "compress/gzip", "context", "encoding/binary", "fmt", "io/ioutil", "os", "text/tabwriter", "time"},
"runtime/trace": {"L0"},
"text/tabwriter": {"L2"},
"testing": {"L2", "flag", "fmt", "internal/race", "os", "runtime/debug", "runtime/pprof", "runtime/trace", "time"},
"testing/iotest": {"L2", "log"},
"testing/quick": {"L2", "flag", "fmt", "reflect", "time"},
"internal/testenv": {"L2", "OS", "flag", "testing", "syscall"},
// L4 is defined as L3+fmt+log+time, because in general once
// you're using L3 packages, use of fmt, log, or time is not a big deal.
"L4": {
"L3",
"fmt",
"log",
"time",
},
// Go parser.
"go/ast": {"L4", "OS", "go/scanner", "go/token"},
"go/doc": {"L4", "go/ast", "go/token", "regexp", "text/template"},
"go/parser": {"L4", "OS", "go/ast", "go/scanner", "go/token"},
"go/printer": {"L4", "OS", "go/ast", "go/scanner", "go/token", "text/tabwriter"},
"go/scanner": {"L4", "OS", "go/token"},
"go/token": {"L4"},
"GOPARSER": {
"go/ast",
"go/doc",
"go/parser",
"go/printer",
"go/scanner",
"go/token",
},
"go/format": {"L4", "GOPARSER", "internal/format"},
"internal/format": {"L4", "GOPARSER"},
// Go type checking.
"go/constant": {"L4", "go/token", "math/big"},
"go/importer": {"L4", "go/build", "go/internal/gccgoimporter", "go/internal/gcimporter", "go/internal/srcimporter", "go/token", "go/types"},
"go/internal/gcimporter": {"L4", "OS", "go/build", "go/constant", "go/token", "go/types", "text/scanner"},
"go/internal/gccgoimporter": {"L4", "OS", "debug/elf", "go/constant", "go/token", "go/types", "text/scanner"},
"go/internal/srcimporter": {"L4", "fmt", "go/ast", "go/build", "go/parser", "go/token", "go/types", "path/filepath"},
"go/types": {"L4", "GOPARSER", "container/heap", "go/constant"},
// One of a kind.
"archive/tar": {"L4", "OS", "syscall"},
"archive/zip": {"L4", "OS", "compress/flate"},
"container/heap": {"sort"},
"compress/bzip2": {"L4"},
"compress/flate": {"L4"},
"compress/gzip": {"L4", "compress/flate"},
"compress/lzw": {"L4"},
"compress/zlib": {"L4", "compress/flate"},
"context": {"errors", "fmt", "reflect", "sync", "time"},
"database/sql": {"L4", "container/list", "context", "database/sql/driver", "database/sql/internal"},
"database/sql/driver": {"L4", "context", "time", "database/sql/internal"},
"debug/dwarf": {"L4"},
"debug/elf": {"L4", "OS", "debug/dwarf", "compress/zlib"},
"debug/gosym": {"L4"},
"debug/macho": {"L4", "OS", "debug/dwarf"},
"debug/pe": {"L4", "OS", "debug/dwarf"},
"debug/plan9obj": {"L4", "OS"},
"encoding": {"L4"},
"encoding/ascii85": {"L4"},
"encoding/asn1": {"L4", "math/big"},
"encoding/csv": {"L4"},
"encoding/gob": {"L4", "OS", "encoding"},
"encoding/hex": {"L4"},
"encoding/json": {"L4", "encoding"},
"encoding/pem": {"L4"},
"encoding/xml": {"L4", "encoding"},
"flag": {"L4", "OS"},
"go/build": {"L4", "OS", "GOPARSER"},
"html": {"L4"},
"image/draw": {"L4", "image/internal/imageutil"},
"image/gif": {"L4", "compress/lzw", "image/color/palette", "image/draw"},
"image/internal/imageutil": {"L4"},
"image/jpeg": {"L4", "image/internal/imageutil"},
"image/png": {"L4", "compress/zlib"},
"index/suffixarray": {"L4", "regexp"},
"internal/singleflight": {"sync"},
"internal/trace": {"L4", "OS"},
"math/big": {"L4"},
"mime": {"L4", "OS", "syscall", "internal/syscall/windows/registry"},
"mime/quotedprintable": {"L4"},
"net/internal/socktest": {"L4", "OS", "syscall"},
"net/url": {"L4"},
"plugin": {"L0", "OS", "CGO"},
"runtime/pprof/internal/profile": {"L4", "OS", "compress/gzip", "regexp"},
"testing/internal/testdeps": {"L4", "runtime/pprof", "regexp"},
"text/scanner": {"L4", "OS"},
"text/template/parse": {"L4"},
"html/template": {
"L4", "OS", "encoding/json", "html", "text/template",
"text/template/parse",
},
"text/template": {
"L4", "OS", "net/url", "text/template/parse",
},
// Cgo.
// If you add a dependency on CGO, you must add the package to
// cgoPackages in cmd/dist/test.go.
"runtime/cgo": {"L0", "C"},
"CGO": {"C", "runtime/cgo"},
// Fake entry to satisfy the pseudo-import "C"
// that shows up in programs that use cgo.
"C": {},
// Race detector/MSan uses cgo.
"runtime/race": {"C"},
"runtime/msan": {"C"},
// Plan 9 alone needs io/ioutil and os.
"os/user": {"L4", "CGO", "io/ioutil", "os", "syscall"},
// Basic networking.
// Because net must be used by any package that wants to
// do networking portably, it must have a small dependency set: just L0+basic os.
"net": {
"L0", "CGO",
"context", "math/rand", "os", "reflect", "sort", "syscall", "time",
"internal/nettrace", "internal/poll",
"internal/syscall/windows", "internal/singleflight", "internal/race",
"golang_org/x/net/lif", "golang_org/x/net/route",
},
// NET enables use of basic network-related packages.
"NET": {
"net",
"mime",
"net/textproto",
"net/url",
},
// Uses of networking.
"log/syslog": {"L4", "OS", "net"},
"net/mail": {"L4", "NET", "OS", "mime"},
"net/textproto": {"L4", "OS", "net"},
// Core crypto.
"crypto/aes": {"L3"},
"crypto/des": {"L3"},
"crypto/hmac": {"L3"},
"crypto/md5": {"L3"},
"crypto/rc4": {"L3"},
"crypto/sha1": {"L3"},
"crypto/sha256": {"L3"},
"crypto/sha512": {"L3"},
"CRYPTO": {
"crypto/aes",
"crypto/des",
"crypto/hmac",
"crypto/md5",
"crypto/rc4",
"crypto/sha1",
"crypto/sha256",
"crypto/sha512",
"golang_org/x/crypto/chacha20poly1305",
"golang_org/x/crypto/curve25519",
"golang_org/x/crypto/poly1305",
},
// Random byte, number generation.
// This would be part of core crypto except that it imports
// math/big, which imports fmt.
"crypto/rand": {"L4", "CRYPTO", "OS", "math/big", "syscall", "internal/syscall/unix"},
// Mathematical crypto: dependencies on fmt (L4) and math/big.
// We could avoid some of the fmt, but math/big imports fmt anyway.
"crypto/dsa": {"L4", "CRYPTO", "math/big"},
"crypto/ecdsa": {"L4", "CRYPTO", "crypto/elliptic", "math/big", "encoding/asn1"},
"crypto/elliptic": {"L4", "CRYPTO", "math/big"},
"crypto/rsa": {"L4", "CRYPTO", "crypto/rand", "math/big"},
"CRYPTO-MATH": {
"CRYPTO",
"crypto/dsa",
"crypto/ecdsa",
"crypto/elliptic",
"crypto/rand",
"crypto/rsa",
"encoding/asn1",
"math/big",
},
// SSL/TLS.
"crypto/tls": {
"L4", "CRYPTO-MATH", "OS",
"container/list", "crypto/x509", "encoding/pem", "net", "syscall",
},
"crypto/x509": {
"L4", "CRYPTO-MATH", "OS", "CGO",
"crypto/x509/pkix", "encoding/pem", "encoding/hex", "net", "os/user", "syscall",
},
"crypto/x509/pkix": {"L4", "CRYPTO-MATH"},
// Simple net+crypto-aware packages.
"mime/multipart": {"L4", "OS", "mime", "crypto/rand", "net/textproto", "mime/quotedprintable"},
"net/smtp": {"L4", "CRYPTO", "NET", "crypto/tls"},
// HTTP, kingpin of dependencies.
"net/http": {
"L4", "NET", "OS",
"compress/gzip",
"container/list",
"context",
"crypto/rand",
"crypto/tls",
"golang_org/x/net/http2/hpack",
"golang_org/x/net/idna",
"golang_org/x/net/lex/httplex",
"golang_org/x/net/proxy",
"golang_org/x/text/unicode/norm",
"golang_org/x/text/width",
"internal/nettrace",
"mime/multipart",
"net/http/httptrace",
"net/http/internal",
"runtime/debug",
},
"net/http/internal": {"L4"},
"net/http/httptrace": {"context", "crypto/tls", "internal/nettrace", "net", "reflect", "time"},
// HTTP-using packages.
"expvar": {"L4", "OS", "encoding/json", "net/http"},
"net/http/cgi": {"L4", "NET", "OS", "crypto/tls", "net/http", "regexp"},
"net/http/cookiejar": {"L4", "NET", "net/http"},
"net/http/fcgi": {"L4", "NET", "OS", "context", "net/http", "net/http/cgi"},
"net/http/httptest": {"L4", "NET", "OS", "crypto/tls", "flag", "net/http", "net/http/internal", "crypto/x509"},
"net/http/httputil": {"L4", "NET", "OS", "context", "net/http", "net/http/internal"},
"net/http/pprof": {"L4", "OS", "html/template", "net/http", "runtime/pprof", "runtime/trace"},
"net/rpc": {"L4", "NET", "encoding/gob", "html/template", "net/http"},
"net/rpc/jsonrpc": {"L4", "NET", "encoding/json", "net/rpc"},
}
// isMacro reports whether p is a package dependency macro
// (uppercase name).
func isMacro(p string) bool {
return 'A' <= p[0] && p[0] <= 'Z'
}
func allowed(pkg string) map[string]bool {
m := map[string]bool{}
var allow func(string)
allow = func(p string) {
if m[p] {
return
}
m[p] = true // set even for macros, to avoid loop on cycle
// Upper-case names are macro-expanded.
if isMacro(p) {
for _, pp := range pkgDeps[p] {
allow(pp)
}
}
}
for _, pp := range pkgDeps[pkg] {
allow(pp)
}
return m
}
// listStdPkgs returns the same list of packages as "go list std".
func listStdPkgs(goroot string) ([]string, error) {
// Based on cmd/go's matchPackages function.
var pkgs []string
src := filepath.Join(goroot, "src") + string(filepath.Separator)
walkFn := func(path string, fi os.FileInfo, err error) error {
if err != nil || !fi.IsDir() || path == src {
return nil
}
base := filepath.Base(path)
if strings.HasPrefix(base, ".") || strings.HasPrefix(base, "_") || base == "testdata" {
return filepath.SkipDir
}
name := filepath.ToSlash(path[len(src):])
if name == "builtin" || name == "cmd" || strings.Contains(name, "golang_org") {
return filepath.SkipDir
}
pkgs = append(pkgs, name)
return nil
}
if err := filepath.Walk(src, walkFn); err != nil {
return nil, err
}
return pkgs, nil
}
// This test does not function well in travis under different go versions for some reason.
//
// func TestDependencies(t *testing.T) {
// iOS := runtime.GOOS == "darwin" && (runtime.GOARCH == "arm" || runtime.GOARCH == "arm64")
// if runtime.GOOS == "nacl" || iOS {
// // Tests run in a limited file system and we do not
// // provide access to every source file.
// t.Skipf("skipping on %s/%s, missing full GOROOT", runtime.GOOS, runtime.GOARCH)
// }
// ctxt := Default
// all, err := listStdPkgs(ctxt.GOROOT)
// if err != nil {
// t.Fatal(err)
// }
// sort.Strings(all)
// for _, pkg := range all {
// imports, err := findImports(pkg)
// if err != nil {
// t.Error(err)
// continue
// }
// ok := allowed(pkg)
// var bad []string
// for _, imp := range imports {
// if !ok[imp] {
// bad = append(bad, imp)
// }
// }
// if bad != nil {
// t.Errorf("unexpected dependency: %s imports %v", pkg, bad)
// }
// }
// }
var buildIgnore = []byte("\n// +build ignore")
func findImports(pkg string) ([]string, error) {
dir := filepath.Join(Default.GOROOT, "src", pkg)
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, err
}
var imports []string
var haveImport = map[string]bool{}
for _, file := range files {
name := file.Name()
if !strings.HasSuffix(name, ".go") || strings.HasSuffix(name, "_test.go") {
continue
}
f, err := os.Open(filepath.Join(dir, name))
if err != nil {
return nil, err
}
var imp []string
data, err := readImports(f, false, &imp)
f.Close()
if err != nil {
return nil, fmt.Errorf("reading %v: %v", name, err)
}
if bytes.Contains(data, buildIgnore) {
continue
}
for _, quoted := range imp {
path, err := strconv.Unquote(quoted)
if err != nil {
continue
}
if !haveImport[path] {
haveImport[path] = true
imports = append(imports, path)
}
}
}
sort.Strings(imports)
return imports, nil
}

166
vendor/github.com/magefile/mage/build/doc.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package build gathers information about Go packages.
//
// Go Path
//
// The Go path is a list of directory trees containing Go source code.
// It is consulted to resolve imports that cannot be found in the standard
// Go tree. The default path is the value of the GOPATH environment
// variable, interpreted as a path list appropriate to the operating system
// (on Unix, the variable is a colon-separated string;
// on Windows, a semicolon-separated string;
// on Plan 9, a list).
//
// Each directory listed in the Go path must have a prescribed structure:
//
// The src/ directory holds source code. The path below 'src' determines
// the import path or executable name.
//
// The pkg/ directory holds installed package objects.
// As in the Go tree, each target operating system and
// architecture pair has its own subdirectory of pkg
// (pkg/GOOS_GOARCH).
//
// If DIR is a directory listed in the Go path, a package with
// source in DIR/src/foo/bar can be imported as "foo/bar" and
// has its compiled form installed to "DIR/pkg/GOOS_GOARCH/foo/bar.a"
// (or, for gccgo, "DIR/pkg/gccgo/foo/libbar.a").
//
// The bin/ directory holds compiled commands.
// Each command is named for its source directory, but only
// using the final element, not the entire path. That is, the
// command with source in DIR/src/foo/quux is installed into
// DIR/bin/quux, not DIR/bin/foo/quux. The foo/ is stripped
// so that you can add DIR/bin to your PATH to get at the
// installed commands.
//
// Here's an example directory layout:
//
// GOPATH=/home/user/gocode
//
// /home/user/gocode/
// src/
// foo/
// bar/ (go code in package bar)
// x.go
// quux/ (go code in package main)
// y.go
// bin/
// quux (installed command)
// pkg/
// linux_amd64/
// foo/
// bar.a (installed package object)
//
// Build Constraints
//
// A build constraint, also known as a build tag, is a line comment that begins
//
// // +build
//
// that lists the conditions under which a file should be included in the package.
// Constraints may appear in any kind of source file (not just Go), but
// they must appear near the top of the file, preceded
// only by blank lines and other line comments. These rules mean that in Go
// files a build constraint must appear before the package clause.
//
// To distinguish build constraints from package documentation, a series of
// build constraints must be followed by a blank line.
//
// A build constraint is evaluated as the OR of space-separated options;
// each option evaluates as the AND of its comma-separated terms;
// and each term is an alphanumeric word or, preceded by !, its negation.
// That is, the build constraint:
//
// // +build linux,386 darwin,!cgo
//
// corresponds to the boolean formula:
//
// (linux AND 386) OR (darwin AND (NOT cgo))
//
// A file may have multiple build constraints. The overall constraint is the AND
// of the individual constraints. That is, the build constraints:
//
// // +build linux darwin
// // +build 386
//
// corresponds to the boolean formula:
//
// (linux OR darwin) AND 386
//
// During a particular build, the following words are satisfied:
//
// - the target operating system, as spelled by runtime.GOOS
// - the target architecture, as spelled by runtime.GOARCH
// - the compiler being used, either "gc" or "gccgo"
// - "cgo", if ctxt.CgoEnabled is true
// - "go1.1", from Go version 1.1 onward
// - "go1.2", from Go version 1.2 onward
// - "go1.3", from Go version 1.3 onward
// - "go1.4", from Go version 1.4 onward
// - "go1.5", from Go version 1.5 onward
// - "go1.6", from Go version 1.6 onward
// - "go1.7", from Go version 1.7 onward
// - "go1.8", from Go version 1.8 onward
// - "go1.9", from Go version 1.9 onward
// - any additional words listed in ctxt.BuildTags
//
// If a file's name, after stripping the extension and a possible _test suffix,
// matches any of the following patterns:
// *_GOOS
// *_GOARCH
// *_GOOS_GOARCH
// (example: source_windows_amd64.go) where GOOS and GOARCH represent
// any known operating system and architecture values respectively, then
// the file is considered to have an implicit build constraint requiring
// those terms (in addition to any explicit constraints in the file).
//
// To keep a file from being considered for the build:
//
// // +build ignore
//
// (any other unsatisfied word will work as well, but ``ignore'' is conventional.)
//
// To build a file only when using cgo, and only on Linux and OS X:
//
// // +build linux,cgo darwin,cgo
//
// Such a file is usually paired with another file implementing the
// default functionality for other systems, which in this case would
// carry the constraint:
//
// // +build !linux,!darwin !cgo
//
// Naming a file dns_windows.go will cause it to be included only when
// building the package for Windows; similarly, math_386.s will be included
// only when building the package for 32-bit x86.
//
// Using GOOS=android matches build tags and files as for GOOS=linux
// in addition to android tags and files.
//
// Binary-Only Packages
//
// It is possible to distribute packages in binary form without including the
// source code used for compiling the package. To do this, the package must
// be distributed with a source file not excluded by build constraints and
// containing a "//go:binary-only-package" comment.
// Like a build constraint, this comment must appear near the top of the file,
// preceded only by blank lines and other line comments and with a blank line
// following the comment, to separate it from the package documentation.
// Unlike build constraints, this comment is only recognized in non-test
// Go source files.
//
// The minimal source code for a binary-only package is therefore:
//
// //go:binary-only-package
//
// package mypkg
//
// The source code may include additional Go code. That code is never compiled
// but will be processed by tools like godoc and might be useful as end-user
// documentation.
//
package build

247
vendor/github.com/magefile/mage/build/read.go generated vendored Normal file
View File

@ -0,0 +1,247 @@
// Copyright 2012 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package build
import (
"bufio"
"errors"
"io"
"unicode/utf8"
)
type importReader struct {
b *bufio.Reader
buf []byte
peek byte
err error
eof bool
nerr int
}
func isIdent(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '_' || c >= utf8.RuneSelf
}
var (
errSyntax = errors.New("syntax error")
errNUL = errors.New("unexpected NUL in input")
)
// syntaxError records a syntax error, but only if an I/O error has not already been recorded.
func (r *importReader) syntaxError() {
if r.err == nil {
r.err = errSyntax
}
}
// readByte reads the next byte from the input, saves it in buf, and returns it.
// If an error occurs, readByte records the error in r.err and returns 0.
func (r *importReader) readByte() byte {
c, err := r.b.ReadByte()
if err == nil {
r.buf = append(r.buf, c)
if c == 0 {
err = errNUL
}
}
if err != nil {
if err == io.EOF {
r.eof = true
} else if r.err == nil {
r.err = err
}
c = 0
}
return c
}
// peekByte returns the next byte from the input reader but does not advance beyond it.
// If skipSpace is set, peekByte skips leading spaces and comments.
func (r *importReader) peekByte(skipSpace bool) byte {
if r.err != nil {
if r.nerr++; r.nerr > 10000 {
panic("go/build: import reader looping")
}
return 0
}
// Use r.peek as first input byte.
// Don't just return r.peek here: it might have been left by peekByte(false)
// and this might be peekByte(true).
c := r.peek
if c == 0 {
c = r.readByte()
}
for r.err == nil && !r.eof {
if skipSpace {
// For the purposes of this reader, semicolons are never necessary to
// understand the input and are treated as spaces.
switch c {
case ' ', '\f', '\t', '\r', '\n', ';':
c = r.readByte()
continue
case '/':
c = r.readByte()
if c == '/' {
for c != '\n' && r.err == nil && !r.eof {
c = r.readByte()
}
} else if c == '*' {
var c1 byte
for (c != '*' || c1 != '/') && r.err == nil {
if r.eof {
r.syntaxError()
}
c, c1 = c1, r.readByte()
}
} else {
r.syntaxError()
}
c = r.readByte()
continue
}
}
break
}
r.peek = c
return r.peek
}
// nextByte is like peekByte but advances beyond the returned byte.
func (r *importReader) nextByte(skipSpace bool) byte {
c := r.peekByte(skipSpace)
r.peek = 0
return c
}
// readKeyword reads the given keyword from the input.
// If the keyword is not present, readKeyword records a syntax error.
func (r *importReader) readKeyword(kw string) {
r.peekByte(true)
for i := 0; i < len(kw); i++ {
if r.nextByte(false) != kw[i] {
r.syntaxError()
return
}
}
if isIdent(r.peekByte(false)) {
r.syntaxError()
}
}
// readIdent reads an identifier from the input.
// If an identifier is not present, readIdent records a syntax error.
func (r *importReader) readIdent() {
c := r.peekByte(true)
if !isIdent(c) {
r.syntaxError()
return
}
for isIdent(r.peekByte(false)) {
r.peek = 0
}
}
// readString reads a quoted string literal from the input.
// If an identifier is not present, readString records a syntax error.
func (r *importReader) readString(save *[]string) {
switch r.nextByte(true) {
case '`':
start := len(r.buf) - 1
for r.err == nil {
if r.nextByte(false) == '`' {
if save != nil {
*save = append(*save, string(r.buf[start:]))
}
break
}
if r.eof {
r.syntaxError()
}
}
case '"':
start := len(r.buf) - 1
for r.err == nil {
c := r.nextByte(false)
if c == '"' {
if save != nil {
*save = append(*save, string(r.buf[start:]))
}
break
}
if r.eof || c == '\n' {
r.syntaxError()
}
if c == '\\' {
r.nextByte(false)
}
}
default:
r.syntaxError()
}
}
// readImport reads an import clause - optional identifier followed by quoted string -
// from the input.
func (r *importReader) readImport(imports *[]string) {
c := r.peekByte(true)
if c == '.' {
r.peek = 0
} else if isIdent(c) {
r.readIdent()
}
r.readString(imports)
}
// readComments is like ioutil.ReadAll, except that it only reads the leading
// block of comments in the file.
func readComments(f io.Reader) ([]byte, error) {
r := &importReader{b: bufio.NewReader(f)}
r.peekByte(true)
if r.err == nil && !r.eof {
// Didn't reach EOF, so must have found a non-space byte. Remove it.
r.buf = r.buf[:len(r.buf)-1]
}
return r.buf, r.err
}
// readImports is like ioutil.ReadAll, except that it expects a Go file as input
// and stops reading the input once the imports have completed.
func readImports(f io.Reader, reportSyntaxError bool, imports *[]string) ([]byte, error) {
r := &importReader{b: bufio.NewReader(f)}
r.readKeyword("package")
r.readIdent()
for r.peekByte(true) == 'i' {
r.readKeyword("import")
if r.peekByte(true) == '(' {
r.nextByte(false)
for r.peekByte(true) != ')' && r.err == nil {
r.readImport(imports)
}
r.nextByte(false)
} else {
r.readImport(imports)
}
}
// If we stopped successfully before EOF, we read a byte that told us we were done.
// Return all but that last byte, which would cause a syntax error if we let it through.
if r.err == nil && !r.eof {
return r.buf[:len(r.buf)-1], nil
}
// If we stopped for a syntax error, consume the whole file so that
// we are sure we don't change the errors that go/parser returns.
if r.err == errSyntax && !reportSyntaxError {
r.err = nil
for r.err == nil && !r.eof {
r.readByte()
}
}
return r.buf, r.err
}

Some files were not shown because too many files have changed in this diff Show More