more dependencies
This commit is contained in:
parent
01eeb5e874
commit
7ed961f871
|
@ -7,6 +7,12 @@
|
|||
"revision": "7d23200fbed9e7f3be4ac76b4f7f6bd19cc4aba0",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/Xe/uuid",
|
||||
"repository": "https://github.com/Xe/uuid",
|
||||
"revision": "62b230097e9c9534ca2074782b25d738c4b68964",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/codegangsta/negroni",
|
||||
"repository": "https://github.com/codegangsta/negroni",
|
||||
|
@ -61,6 +67,12 @@
|
|||
"revision": "0dad96c0b94f8dee039aa40467f767467392a0af",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/scalingdata/gcfg",
|
||||
"repository": "https://github.com/scalingdata/gcfg",
|
||||
"revision": "37aabad69cfd3d20b8390d902a8b10e245c615ff",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/sebest/xff",
|
||||
"repository": "https://github.com/sebest/xff",
|
||||
|
@ -79,6 +91,12 @@
|
|||
"revision": "aa61028b1d32873eaa3e261a3ef0e892a153107b",
|
||||
"branch": "v1"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/yosssi/ace",
|
||||
"repository": "https://github.com/yosssi/ace",
|
||||
"revision": "78e48a2f0ac5fb5a642585f96b03a5f47f7775f5",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/yosssi/ace-proxy",
|
||||
"repository": "https://github.com/yosssi/ace-proxy",
|
||||
|
@ -98,12 +116,6 @@
|
|||
"revision": "5ec5e003b21ac1f06e175898413ada23a6797fc0",
|
||||
"branch": "master",
|
||||
"path": "/tiff"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/yosssi/ace",
|
||||
"repository": "https://github.com/yosssi/ace",
|
||||
"revision": "78e48a2f0ac5fb5a642585f96b03a5f47f7775f5",
|
||||
"branch": "master"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,2 @@
|
|||
Paul Borman <borman@google.com>
|
||||
Christine Dodrill <xena@yolo-swag.com>
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 Google Inc. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,5 @@
|
|||
go-uuid
|
||||
=======
|
||||
|
||||
code.google.com is going away and I use this library a lot. It used to live at
|
||||
https://code.google.com/p/go-uuid/ but now I take care of it.
|
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
)
|
||||
|
||||
// A Domain represents a Version 2 domain
|
||||
type Domain byte
|
||||
|
||||
// Domain constants for DCE Security (Version 2) UUIDs.
|
||||
const (
|
||||
Person = Domain(0)
|
||||
Group = Domain(1)
|
||||
Org = Domain(2)
|
||||
)
|
||||
|
||||
// NewDCESecurity returns a DCE Security (Version 2) UUID.
|
||||
//
|
||||
// The domain should be one of Person, Group or Org.
|
||||
// On a POSIX system the id should be the users UID for the Person
|
||||
// domain and the users GID for the Group. The meaning of id for
|
||||
// the domain Org or on non-POSIX systems is site defined.
|
||||
//
|
||||
// For a given domain/id pair the same token may be returned for up to
|
||||
// 7 minutes and 10 seconds.
|
||||
func NewDCESecurity(domain Domain, id uint32) UUID {
|
||||
uuid := NewUUID()
|
||||
if uuid != nil {
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
|
||||
uuid[9] = byte(domain)
|
||||
binary.BigEndian.PutUint32(uuid[0:], id)
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
|
||||
// domain with the id returned by os.Getuid.
|
||||
//
|
||||
// NewDCEPerson(Person, uint32(os.Getuid()))
|
||||
func NewDCEPerson() UUID {
|
||||
return NewDCESecurity(Person, uint32(os.Getuid()))
|
||||
}
|
||||
|
||||
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
|
||||
// domain with the id returned by os.Getgid.
|
||||
//
|
||||
// NewDCEGroup(Group, uint32(os.Getgid()))
|
||||
func NewDCEGroup() UUID {
|
||||
return NewDCESecurity(Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
// Domain returns the domain for a Version 2 UUID or false.
|
||||
func (uuid UUID) Domain() (Domain, bool) {
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
return 0, false
|
||||
}
|
||||
return Domain(uuid[9]), true
|
||||
}
|
||||
|
||||
// Id returns the id for a Version 2 UUID or false.
|
||||
func (uuid UUID) Id() (uint32, bool) {
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
return 0, false
|
||||
}
|
||||
return binary.BigEndian.Uint32(uuid[0:4]), true
|
||||
}
|
||||
|
||||
func (d Domain) String() string {
|
||||
switch d {
|
||||
case Person:
|
||||
return "Person"
|
||||
case Group:
|
||||
return "Group"
|
||||
case Org:
|
||||
return "Org"
|
||||
}
|
||||
return fmt.Sprintf("Domain%d", int(d))
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The uuid package generates and inspects UUIDs.
|
||||
//
|
||||
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
|
||||
package uuid
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"crypto/md5"
|
||||
"crypto/sha1"
|
||||
"hash"
|
||||
)
|
||||
|
||||
// Well known Name Space IDs and UUIDs
|
||||
var (
|
||||
NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
|
||||
NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
|
||||
NIL = Parse("00000000-0000-0000-0000-000000000000")
|
||||
)
|
||||
|
||||
// NewHash returns a new UUID dervied from the hash of space concatenated with
|
||||
// data generated by h. The hash should be at least 16 byte in length. The
|
||||
// first 16 bytes of the hash are used to form the UUID. The version of the
|
||||
// UUID will be the lower 4 bits of version. NewHash is used to implement
|
||||
// NewMD5 and NewSHA1.
|
||||
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
|
||||
h.Reset()
|
||||
h.Write(space)
|
||||
h.Write([]byte(data))
|
||||
s := h.Sum(nil)
|
||||
uuid := make([]byte, 16)
|
||||
copy(uuid, s)
|
||||
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
|
||||
return uuid
|
||||
}
|
||||
|
||||
// NewMD5 returns a new MD5 (Version 3) UUID based on the
|
||||
// supplied name space and data.
|
||||
//
|
||||
// NewHash(md5.New(), space, data, 3)
|
||||
func NewMD5(space UUID, data []byte) UUID {
|
||||
return NewHash(md5.New(), space, data, 3)
|
||||
}
|
||||
|
||||
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
|
||||
// supplied name space and data.
|
||||
//
|
||||
// NewHash(sha1.New(), space, data, 5)
|
||||
func NewSHA1(space UUID, data []byte) UUID {
|
||||
return NewHash(sha1.New(), space, data, 5)
|
||||
}
|
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import "net"
|
||||
|
||||
var (
|
||||
interfaces []net.Interface // cached list of interfaces
|
||||
ifname string // name of interface being used
|
||||
nodeID []byte // hardware for version 1 UUIDs
|
||||
)
|
||||
|
||||
// NodeInterface returns the name of the interface from which the NodeID was
|
||||
// derived. The interface "user" is returned if the NodeID was set by
|
||||
// SetNodeID.
|
||||
func NodeInterface() string {
|
||||
return ifname
|
||||
}
|
||||
|
||||
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
|
||||
// If name is "" then the first usable interface found will be used or a random
|
||||
// Node ID will be generated. If a named interface cannot be found then false
|
||||
// is returned.
|
||||
//
|
||||
// SetNodeInterface never fails when name is "".
|
||||
func SetNodeInterface(name string) bool {
|
||||
if interfaces == nil {
|
||||
var err error
|
||||
interfaces, err = net.Interfaces()
|
||||
if err != nil && name != "" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, ifs := range interfaces {
|
||||
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
|
||||
if setNodeID(ifs.HardwareAddr) {
|
||||
ifname = ifs.Name
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We found no interfaces with a valid hardware address. If name
|
||||
// does not specify a specific interface generate a random Node ID
|
||||
// (section 4.1.6)
|
||||
if name == "" {
|
||||
if nodeID == nil {
|
||||
nodeID = make([]byte, 6)
|
||||
}
|
||||
randomBits(nodeID)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
|
||||
// if not already set.
|
||||
func NodeID() []byte {
|
||||
if nodeID == nil {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
nid := make([]byte, 6)
|
||||
copy(nid, nodeID)
|
||||
return nid
|
||||
}
|
||||
|
||||
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
|
||||
// of id are used. If id is less than 6 bytes then false is returned and the
|
||||
// Node ID is not set.
|
||||
func SetNodeID(id []byte) bool {
|
||||
if setNodeID(id) {
|
||||
ifname = "user"
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func setNodeID(id []byte) bool {
|
||||
if len(id) < 6 {
|
||||
return false
|
||||
}
|
||||
if nodeID == nil {
|
||||
nodeID = make([]byte, 6)
|
||||
}
|
||||
copy(nodeID, id)
|
||||
return true
|
||||
}
|
||||
|
||||
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
|
||||
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) NodeID() []byte {
|
||||
if len(uuid) != 16 {
|
||||
return nil
|
||||
}
|
||||
node := make([]byte, 6)
|
||||
copy(node, uuid[10:])
|
||||
return node
|
||||
}
|
|
@ -0,0 +1,132 @@
|
|||
// Copyright 2014 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
|
||||
// 1582.
|
||||
type Time int64
|
||||
|
||||
const (
|
||||
lillian = 2299160 // Julian day of 15 Oct 1582
|
||||
unix = 2440587 // Julian day of 1 Jan 1970
|
||||
epoch = unix - lillian // Days between epochs
|
||||
g1582 = epoch * 86400 // seconds between epochs
|
||||
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
lasttime uint64 // last time we returned
|
||||
clock_seq uint16 // clock sequence for this run
|
||||
|
||||
timeNow = time.Now // for testing
|
||||
)
|
||||
|
||||
// UnixTime converts t the number of seconds and nanoseconds using the Unix
|
||||
// epoch of 1 Jan 1970.
|
||||
func (t Time) UnixTime() (sec, nsec int64) {
|
||||
sec = int64(t - g1582ns100)
|
||||
nsec = (sec % 10000000) * 100
|
||||
sec /= 10000000
|
||||
return sec, nsec
|
||||
}
|
||||
|
||||
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
|
||||
// adjusts the clock sequence as needed. An error is returned if the current
|
||||
// time cannot be determined.
|
||||
func GetTime() (Time, error) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return getTime()
|
||||
}
|
||||
|
||||
func getTime() (Time, error) {
|
||||
t := timeNow()
|
||||
|
||||
// If we don't have a clock sequence already, set one.
|
||||
if clock_seq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
now := uint64(t.UnixNano()/100) + g1582ns100
|
||||
|
||||
// If time has gone backwards with this clock sequence then we
|
||||
// increment the clock sequence
|
||||
if now <= lasttime {
|
||||
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
|
||||
}
|
||||
lasttime = now
|
||||
return Time(now), nil
|
||||
}
|
||||
|
||||
// ClockSequence returns the current clock sequence, generating one if not
|
||||
// already set. The clock sequence is only used for Version 1 UUIDs.
|
||||
//
|
||||
// The uuid package does not use global static storage for the clock sequence or
|
||||
// the last time a UUID was generated. Unless SetClockSequence a new random
|
||||
// clock sequence is generated the first time a clock sequence is requested by
|
||||
// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
|
||||
// for
|
||||
func ClockSequence() int {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
return clockSequence()
|
||||
}
|
||||
|
||||
func clockSequence() int {
|
||||
if clock_seq == 0 {
|
||||
setClockSequence(-1)
|
||||
}
|
||||
return int(clock_seq & 0x3fff)
|
||||
}
|
||||
|
||||
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
|
||||
// -1 causes a new sequence to be generated.
|
||||
func SetClockSequence(seq int) {
|
||||
defer mu.Unlock()
|
||||
mu.Lock()
|
||||
setClockSequence(seq)
|
||||
}
|
||||
|
||||
func setClockSequence(seq int) {
|
||||
if seq == -1 {
|
||||
var b [2]byte
|
||||
randomBits(b[:]) // clock sequence
|
||||
seq = int(b[0])<<8 | int(b[1])
|
||||
}
|
||||
old_seq := clock_seq
|
||||
clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
|
||||
if old_seq != clock_seq {
|
||||
lasttime = 0
|
||||
}
|
||||
}
|
||||
|
||||
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
|
||||
// uuid. It returns false if uuid is not valid. The time is only well defined
|
||||
// for version 1 and 2 UUIDs.
|
||||
func (uuid UUID) Time() (Time, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
|
||||
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
|
||||
return Time(time), true
|
||||
}
|
||||
|
||||
// ClockSequence returns the clock sequence encoded in uuid. It returns false
|
||||
// if uuid is not valid. The clock sequence is only well defined for version 1
|
||||
// and 2 UUIDs.
|
||||
func (uuid UUID) ClockSequence() (int, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
|
||||
}
|
|
@ -0,0 +1,43 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// randomBits completely fills slice b with random data.
|
||||
func randomBits(b []byte) {
|
||||
if _, err := io.ReadFull(rander, b); err != nil {
|
||||
panic(err.Error()) // rand should never fail
|
||||
}
|
||||
}
|
||||
|
||||
// xvalues returns the value of a byte as a hexadecimal digit or 255.
|
||||
var xvalues = []byte{
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
|
||||
}
|
||||
|
||||
// xtob converts the the first two hex bytes of x into a byte.
|
||||
func xtob(x string) (byte, bool) {
|
||||
b1 := xvalues[x[0]]
|
||||
b2 := xvalues[x[1]]
|
||||
return (b1 << 4) | b2, b1 != 255 && b2 != 255
|
||||
}
|
|
@ -0,0 +1,163 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
|
||||
// 4122.
|
||||
type UUID []byte
|
||||
|
||||
// A Version represents a UUIDs version.
|
||||
type Version byte
|
||||
|
||||
// A Variant represents a UUIDs variant.
|
||||
type Variant byte
|
||||
|
||||
// Constants returned by Variant.
|
||||
const (
|
||||
Invalid = Variant(iota) // Invalid UUID
|
||||
RFC4122 // The variant specified in RFC4122
|
||||
Reserved // Reserved, NCS backward compatibility.
|
||||
Microsoft // Reserved, Microsoft Corporation backward compatibility.
|
||||
Future // Reserved for future definition.
|
||||
)
|
||||
|
||||
var rander = rand.Reader // random function
|
||||
|
||||
// New returns a new random (version 4) UUID as a string. It is a convenience
|
||||
// function for NewRandom().String().
|
||||
func New() string {
|
||||
return NewRandom().String()
|
||||
}
|
||||
|
||||
// Parse decodes s into a UUID or returns nil. Both the UUID form of
|
||||
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
|
||||
func Parse(s string) UUID {
|
||||
if len(s) == 36+9 {
|
||||
if strings.ToLower(s[:9]) != "urn:uuid:" {
|
||||
return nil
|
||||
}
|
||||
s = s[9:]
|
||||
} else if len(s) != 36 {
|
||||
return nil
|
||||
}
|
||||
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
|
||||
return nil
|
||||
}
|
||||
uuid := make([]byte, 16)
|
||||
for i, x := range []int{
|
||||
0, 2, 4, 6,
|
||||
9, 11,
|
||||
14, 16,
|
||||
19, 21,
|
||||
24, 26, 28, 30, 32, 34} {
|
||||
if v, ok := xtob(s[x:]); !ok {
|
||||
return nil
|
||||
} else {
|
||||
uuid[i] = v
|
||||
}
|
||||
}
|
||||
return uuid
|
||||
}
|
||||
|
||||
// Equal returns true if uuid1 and uuid2 are equal.
|
||||
func Equal(uuid1, uuid2 UUID) bool {
|
||||
return bytes.Equal(uuid1, uuid2)
|
||||
}
|
||||
|
||||
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
|
||||
// , or "" if uuid is invalid.
|
||||
func (uuid UUID) String() string {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// URN returns the RFC 2141 URN form of uuid,
|
||||
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
|
||||
func (uuid UUID) URN() string {
|
||||
if uuid == nil || len(uuid) != 16 {
|
||||
return ""
|
||||
}
|
||||
b := []byte(uuid)
|
||||
return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
|
||||
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
|
||||
}
|
||||
|
||||
// Variant returns the variant encoded in uuid. It returns Invalid if
|
||||
// uuid is invalid.
|
||||
func (uuid UUID) Variant() Variant {
|
||||
if len(uuid) != 16 {
|
||||
return Invalid
|
||||
}
|
||||
switch {
|
||||
case (uuid[8] & 0xc0) == 0x80:
|
||||
return RFC4122
|
||||
case (uuid[8] & 0xe0) == 0xc0:
|
||||
return Microsoft
|
||||
case (uuid[8] & 0xe0) == 0xe0:
|
||||
return Future
|
||||
default:
|
||||
return Reserved
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// Version returns the verison of uuid. It returns false if uuid is not
|
||||
// valid.
|
||||
func (uuid UUID) Version() (Version, bool) {
|
||||
if len(uuid) != 16 {
|
||||
return 0, false
|
||||
}
|
||||
return Version(uuid[6] >> 4), true
|
||||
}
|
||||
|
||||
func (v Version) String() string {
|
||||
if v > 15 {
|
||||
return fmt.Sprintf("BAD_VERSION_%d", v)
|
||||
}
|
||||
return fmt.Sprintf("VERSION_%d", v)
|
||||
}
|
||||
|
||||
func (v Variant) String() string {
|
||||
switch v {
|
||||
case RFC4122:
|
||||
return "RFC4122"
|
||||
case Reserved:
|
||||
return "Reserved"
|
||||
case Microsoft:
|
||||
return "Microsoft"
|
||||
case Future:
|
||||
return "Future"
|
||||
case Invalid:
|
||||
return "Invalid"
|
||||
}
|
||||
return fmt.Sprintf("BadVariant%d", int(v))
|
||||
}
|
||||
|
||||
// SetRand sets the random number generator to r, which implents io.Reader.
|
||||
// If r.Read returns an error when the package requests random data then
|
||||
// a panic will be issued.
|
||||
//
|
||||
// Calling SetRand with nil sets the random number generator to the default
|
||||
// generator.
|
||||
func SetRand(r io.Reader) {
|
||||
if r == nil {
|
||||
rander = rand.Reader
|
||||
return
|
||||
}
|
||||
rander = r
|
||||
}
|
|
@ -0,0 +1,390 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
type test struct {
|
||||
in string
|
||||
version Version
|
||||
variant Variant
|
||||
isuuid bool
|
||||
}
|
||||
|
||||
var tests = []test{
|
||||
{"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
|
||||
{"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
|
||||
{"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
|
||||
{"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
|
||||
{"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
|
||||
{"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
|
||||
{"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
|
||||
{"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
|
||||
{"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
|
||||
{"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
|
||||
{"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
|
||||
{"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
|
||||
{"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
|
||||
{"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
|
||||
|
||||
{"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
|
||||
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
|
||||
{"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
|
||||
{"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
|
||||
{"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
|
||||
{"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
|
||||
|
||||
{"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
|
||||
{"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
|
||||
{"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
|
||||
}
|
||||
|
||||
var constants = []struct {
|
||||
c interface{}
|
||||
name string
|
||||
}{
|
||||
{Person, "Person"},
|
||||
{Group, "Group"},
|
||||
{Org, "Org"},
|
||||
{Invalid, "Invalid"},
|
||||
{RFC4122, "RFC4122"},
|
||||
{Reserved, "Reserved"},
|
||||
{Microsoft, "Microsoft"},
|
||||
{Future, "Future"},
|
||||
{Domain(17), "Domain17"},
|
||||
{Variant(42), "BadVariant42"},
|
||||
}
|
||||
|
||||
func testTest(t *testing.T, in string, tt test) {
|
||||
uuid := Parse(in)
|
||||
if ok := (uuid != nil); ok != tt.isuuid {
|
||||
t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
|
||||
}
|
||||
if uuid == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if v := uuid.Variant(); v != tt.variant {
|
||||
t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
|
||||
}
|
||||
if v, _ := uuid.Version(); v != tt.version {
|
||||
t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUUID(t *testing.T) {
|
||||
for _, tt := range tests {
|
||||
testTest(t, tt.in, tt)
|
||||
testTest(t, strings.ToUpper(tt.in), tt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConstants(t *testing.T) {
|
||||
for x, tt := range constants {
|
||||
v, ok := tt.c.(fmt.Stringer)
|
||||
if !ok {
|
||||
t.Errorf("%x: %v: not a stringer", x, v)
|
||||
} else if s := v.String(); s != tt.name {
|
||||
v, _ := tt.c.(int)
|
||||
t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRandomUUID(t *testing.T) {
|
||||
m := make(map[string]bool)
|
||||
for x := 1; x < 32; x++ {
|
||||
uuid := NewRandom()
|
||||
s := uuid.String()
|
||||
if m[s] {
|
||||
t.Errorf("NewRandom returned duplicated UUID %s\n", s)
|
||||
}
|
||||
m[s] = true
|
||||
if v, _ := uuid.Version(); v != 4 {
|
||||
t.Errorf("Random UUID of version %s\n", v)
|
||||
}
|
||||
if uuid.Variant() != RFC4122 {
|
||||
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNew(t *testing.T) {
|
||||
m := make(map[string]bool)
|
||||
for x := 1; x < 32; x++ {
|
||||
s := New()
|
||||
if m[s] {
|
||||
t.Errorf("New returned duplicated UUID %s\n", s)
|
||||
}
|
||||
m[s] = true
|
||||
uuid := Parse(s)
|
||||
if uuid == nil {
|
||||
t.Errorf("New returned %q which does not decode\n", s)
|
||||
continue
|
||||
}
|
||||
if v, _ := uuid.Version(); v != 4 {
|
||||
t.Errorf("Random UUID of version %s\n", v)
|
||||
}
|
||||
if uuid.Variant() != RFC4122 {
|
||||
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func clockSeq(t *testing.T, uuid UUID) int {
|
||||
seq, ok := uuid.ClockSequence()
|
||||
if !ok {
|
||||
t.Fatalf("%s: invalid clock sequence\n", uuid)
|
||||
}
|
||||
return seq
|
||||
}
|
||||
|
||||
func TestClockSeq(t *testing.T) {
|
||||
// Fake time.Now for this test to return a monotonically advancing time; restore it at end.
|
||||
defer func(orig func() time.Time) { timeNow = orig }(timeNow)
|
||||
monTime := time.Now()
|
||||
timeNow = func() time.Time {
|
||||
monTime = monTime.Add(1 * time.Second)
|
||||
return monTime
|
||||
}
|
||||
|
||||
SetClockSequence(-1)
|
||||
uuid1 := NewUUID()
|
||||
uuid2 := NewUUID()
|
||||
|
||||
if clockSeq(t, uuid1) != clockSeq(t, uuid2) {
|
||||
t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2))
|
||||
}
|
||||
|
||||
SetClockSequence(-1)
|
||||
uuid2 = NewUUID()
|
||||
|
||||
// Just on the very off chance we generated the same sequence
|
||||
// two times we try again.
|
||||
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
|
||||
SetClockSequence(-1)
|
||||
uuid2 = NewUUID()
|
||||
}
|
||||
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
|
||||
t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1))
|
||||
}
|
||||
|
||||
SetClockSequence(0x1234)
|
||||
uuid1 = NewUUID()
|
||||
if seq := clockSeq(t, uuid1); seq != 0x1234 {
|
||||
t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCoding(t *testing.T) {
|
||||
text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
|
||||
urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
|
||||
data := UUID{
|
||||
0x7d, 0x44, 0x48, 0x40,
|
||||
0x9d, 0xc0,
|
||||
0x11, 0xd1,
|
||||
0xb2, 0x45,
|
||||
0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
|
||||
}
|
||||
if v := data.String(); v != text {
|
||||
t.Errorf("%x: encoded to %s, expected %s\n", data, v, text)
|
||||
}
|
||||
if v := data.URN(); v != urn {
|
||||
t.Errorf("%x: urn is %s, expected %s\n", data, v, urn)
|
||||
}
|
||||
|
||||
uuid := Parse(text)
|
||||
if !Equal(uuid, data) {
|
||||
t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data)
|
||||
}
|
||||
}
|
||||
|
||||
func TestVersion1(t *testing.T) {
|
||||
uuid1 := NewUUID()
|
||||
uuid2 := NewUUID()
|
||||
|
||||
if Equal(uuid1, uuid2) {
|
||||
t.Errorf("%s:duplicate uuid\n", uuid1)
|
||||
}
|
||||
if v, _ := uuid1.Version(); v != 1 {
|
||||
t.Errorf("%s: version %s expected 1\n", uuid1, v)
|
||||
}
|
||||
if v, _ := uuid2.Version(); v != 1 {
|
||||
t.Errorf("%s: version %s expected 1\n", uuid2, v)
|
||||
}
|
||||
n1 := uuid1.NodeID()
|
||||
n2 := uuid2.NodeID()
|
||||
if !bytes.Equal(n1, n2) {
|
||||
t.Errorf("Different nodes %x != %x\n", n1, n2)
|
||||
}
|
||||
t1, ok := uuid1.Time()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid time\n", uuid1)
|
||||
}
|
||||
t2, ok := uuid2.Time()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid time\n", uuid2)
|
||||
}
|
||||
q1, ok := uuid1.ClockSequence()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid clock sequence\n", uuid1)
|
||||
}
|
||||
q2, ok := uuid2.ClockSequence()
|
||||
if !ok {
|
||||
t.Errorf("%s: invalid clock sequence", uuid2)
|
||||
}
|
||||
|
||||
switch {
|
||||
case t1 == t2 && q1 == q2:
|
||||
t.Errorf("time stopped\n")
|
||||
case t1 > t2 && q1 == q2:
|
||||
t.Errorf("time reversed\n")
|
||||
case t1 < t2 && q1 != q2:
|
||||
t.Errorf("clock sequence chaned unexpectedly\n")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeAndTime(t *testing.T) {
|
||||
// Time is February 5, 1998 12:30:23.136364800 AM GMT
|
||||
|
||||
uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
|
||||
node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
|
||||
|
||||
ts, ok := uuid.Time()
|
||||
if ok {
|
||||
c := time.Unix(ts.UnixTime())
|
||||
want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
|
||||
if !c.Equal(want) {
|
||||
t.Errorf("Got time %v, want %v", c, want)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("%s: bad time\n", uuid)
|
||||
}
|
||||
if !bytes.Equal(node, uuid.NodeID()) {
|
||||
t.Errorf("Expected node %v got %v\n", node, uuid.NodeID())
|
||||
}
|
||||
}
|
||||
|
||||
func TestMD5(t *testing.T) {
|
||||
uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String()
|
||||
want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
|
||||
if uuid != want {
|
||||
t.Errorf("MD5: got %q expected %q\n", uuid, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSHA1(t *testing.T) {
|
||||
uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String()
|
||||
want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
|
||||
if uuid != want {
|
||||
t.Errorf("SHA1: got %q expected %q\n", uuid, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNodeID(t *testing.T) {
|
||||
nid := []byte{1, 2, 3, 4, 5, 6}
|
||||
SetNodeInterface("")
|
||||
s := NodeInterface()
|
||||
if s == "" || s == "user" {
|
||||
t.Errorf("NodeInterface %q after SetInteface\n", s)
|
||||
}
|
||||
node1 := NodeID()
|
||||
if node1 == nil {
|
||||
t.Errorf("NodeID nil after SetNodeInterface\n", s)
|
||||
}
|
||||
SetNodeID(nid)
|
||||
s = NodeInterface()
|
||||
if s != "user" {
|
||||
t.Errorf("Expected NodeInterface %q got %q\n", "user", s)
|
||||
}
|
||||
node2 := NodeID()
|
||||
if node2 == nil {
|
||||
t.Errorf("NodeID nil after SetNodeID\n", s)
|
||||
}
|
||||
if bytes.Equal(node1, node2) {
|
||||
t.Errorf("NodeID not changed after SetNodeID\n", s)
|
||||
} else if !bytes.Equal(nid, node2) {
|
||||
t.Errorf("NodeID is %x, expected %x\n", node2, nid)
|
||||
}
|
||||
}
|
||||
|
||||
func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) {
|
||||
if uuid == nil {
|
||||
t.Errorf("%s failed\n", name)
|
||||
return
|
||||
}
|
||||
if v, _ := uuid.Version(); v != 2 {
|
||||
t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v)
|
||||
return
|
||||
}
|
||||
if v, ok := uuid.Domain(); !ok || v != domain {
|
||||
if !ok {
|
||||
t.Errorf("%s: %d: Domain failed\n", name, uuid)
|
||||
} else {
|
||||
t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v)
|
||||
}
|
||||
}
|
||||
if v, ok := uuid.Id(); !ok || v != id {
|
||||
if !ok {
|
||||
t.Errorf("%s: %d: Id failed\n", name, uuid)
|
||||
} else {
|
||||
t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDCE(t *testing.T) {
|
||||
testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678)
|
||||
testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid()))
|
||||
testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid()))
|
||||
}
|
||||
|
||||
type badRand struct{}
|
||||
|
||||
func (r badRand) Read(buf []byte) (int, error) {
|
||||
for i, _ := range buf {
|
||||
buf[i] = byte(i)
|
||||
}
|
||||
return len(buf), nil
|
||||
}
|
||||
|
||||
func TestBadRand(t *testing.T) {
|
||||
SetRand(badRand{})
|
||||
uuid1 := New()
|
||||
uuid2 := New()
|
||||
if uuid1 != uuid2 {
|
||||
t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2)
|
||||
}
|
||||
SetRand(nil)
|
||||
uuid1 = New()
|
||||
uuid2 = New()
|
||||
if uuid1 == uuid2 {
|
||||
t.Errorf("unexecpted duplicates, got %q\n", uuid1)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
|
||||
// sequence, and the current time. If the NodeID has not been set by SetNodeID
|
||||
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
|
||||
// be set NewUUID returns nil. If clock sequence has not been set by
|
||||
// SetClockSequence then it will be set automatically. If GetTime fails to
|
||||
// return the current NewUUID returns nil.
|
||||
func NewUUID() UUID {
|
||||
if nodeID == nil {
|
||||
SetNodeInterface("")
|
||||
}
|
||||
|
||||
now, err := GetTime()
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
uuid := make([]byte, 16)
|
||||
|
||||
time_low := uint32(now & 0xffffffff)
|
||||
time_mid := uint16((now >> 32) & 0xffff)
|
||||
time_hi := uint16((now >> 48) & 0x0fff)
|
||||
time_hi |= 0x1000 // Version 1
|
||||
|
||||
binary.BigEndian.PutUint32(uuid[0:], time_low)
|
||||
binary.BigEndian.PutUint16(uuid[4:], time_mid)
|
||||
binary.BigEndian.PutUint16(uuid[6:], time_hi)
|
||||
binary.BigEndian.PutUint16(uuid[8:], clock_seq)
|
||||
copy(uuid[10:], nodeID)
|
||||
|
||||
return uuid
|
||||
}
|
|
@ -0,0 +1,25 @@
|
|||
// Copyright 2011 Google Inc. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package uuid
|
||||
|
||||
// Random returns a Random (Version 4) UUID or panics.
|
||||
//
|
||||
// The strength of the UUIDs is based on the strength of the crypto/rand
|
||||
// package.
|
||||
//
|
||||
// A note about uniqueness derived from from the UUID Wikipedia entry:
|
||||
//
|
||||
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
|
||||
// hit by a meteorite is estimated to be one chance in 17 billion, that
|
||||
// means the probability is about 0.00000000006 (6 × 10−11),
|
||||
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
|
||||
// year and having one duplicate.
|
||||
func NewRandom() UUID {
|
||||
uuid := make([]byte, 16)
|
||||
randomBits([]byte(uuid))
|
||||
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
|
||||
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
|
||||
return uuid
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
Copyright (c) 2012 Péter Surányi. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
----------------------------------------------------------------------
|
||||
Portions of gcfg's source code have been derived from Go, and are
|
||||
covered by the following license:
|
||||
----------------------------------------------------------------------
|
||||
|
||||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,7 @@
|
|||
Gcfg reads INI-style configuration files into Go structs;
|
||||
supports user-defined types and subsections.
|
||||
|
||||
Project page: https://code.google.com/p/gcfg
|
||||
Package docs: http://godoc.org/code.google.com/p/gcfg
|
||||
|
||||
My other projects: https://speter.net
|
|
@ -0,0 +1,118 @@
|
|||
// Package gcfg reads "INI-style" text-based configuration files with
|
||||
// "name=value" pairs grouped into sections (gcfg files).
|
||||
//
|
||||
// This package is still a work in progress; see the sections below for planned
|
||||
// changes.
|
||||
//
|
||||
// Syntax
|
||||
//
|
||||
// The syntax is based on that used by git config:
|
||||
// http://git-scm.com/docs/git-config#_syntax .
|
||||
// There are some (planned) differences compared to the git config format:
|
||||
// - improve data portability:
|
||||
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
|
||||
// - include and "path" type is not supported
|
||||
// (path type may be implementable as a user-defined type)
|
||||
// - internationalization
|
||||
// - section and variable names can contain unicode letters, unicode digits
|
||||
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
|
||||
// (U+002D), starting with a unicode letter
|
||||
// - disallow potentially ambiguous or misleading definitions:
|
||||
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
|
||||
// - `[sec ""]` is not allowed
|
||||
// - use `[sec]` for section name "sec" and empty subsection name
|
||||
// - (planned) within a single file, definitions must be contiguous for each:
|
||||
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
|
||||
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
|
||||
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
|
||||
//
|
||||
// Data structure
|
||||
//
|
||||
// The functions in this package read values into a user-defined struct.
|
||||
// Each section corresponds to a struct field in the config struct, and each
|
||||
// variable in a section corresponds to a data field in the section struct.
|
||||
// The mapping of each section or variable name to fields is done either based
|
||||
// on the "gcfg" struct tag or by matching the name of the section or variable,
|
||||
// ignoring case. In the latter case, hyphens '-' in section and variable names
|
||||
// correspond to underscores '_' in field names.
|
||||
// Fields must be exported; to use a section or variable name starting with a
|
||||
// letter that is neither upper- or lower-case, prefix the field name with 'X'.
|
||||
// (See https://code.google.com/p/go/issues/detail?id=5763#c4 .)
|
||||
//
|
||||
// For sections with subsections, the corresponding field in config must be a
|
||||
// map, rather than a struct, with string keys and pointer-to-struct values.
|
||||
// Values for subsection variables are stored in the map with the subsection
|
||||
// name used as the map key.
|
||||
// (Note that unlike section and variable names, subsection names are case
|
||||
// sensitive.)
|
||||
// When using a map, and there is a section with the same section name but
|
||||
// without a subsection name, its values are stored with the empty string used
|
||||
// as the key.
|
||||
//
|
||||
// The functions in this package panic if config is not a pointer to a struct,
|
||||
// or when a field is not of a suitable type (either a struct or a map with
|
||||
// string keys and pointer-to-struct values).
|
||||
//
|
||||
// Parsing of values
|
||||
//
|
||||
// The section structs in the config struct may contain single-valued or
|
||||
// multi-valued variables. Variables of unnamed slice type (that is, a type
|
||||
// starting with `[]`) are treated as multi-value; all others (including named
|
||||
// slice types) are treated as single-valued variables.
|
||||
//
|
||||
// Single-valued variables are handled based on the type as follows.
|
||||
// Unnamed pointer types (that is, types starting with `*`) are dereferenced,
|
||||
// and if necessary, a new instance is allocated.
|
||||
//
|
||||
// For types implementing the encoding.TextUnmarshaler interface, the
|
||||
// UnmarshalText method is used to set the value. Implementing this method is
|
||||
// the recommended way for parsing user-defined types.
|
||||
//
|
||||
// For fields of string kind, the value string is assigned to the field, after
|
||||
// unquoting and unescaping as needed.
|
||||
// For fields of bool kind, the field is set to true if the value is "true",
|
||||
// "yes", "on" or "1", and set to false if the value is "false", "no", "off" or
|
||||
// "0", ignoring case. In addition, single-valued bool fields can be specified
|
||||
// with a "blank" value (variable name without equals sign and value); in such
|
||||
// case the value is set to true.
|
||||
//
|
||||
// Predefined integer types [u]int(|8|16|32|64) and big.Int are parsed as
|
||||
// decimal or hexadecimal (if having '0x' prefix). (This is to prevent
|
||||
// unintuitively handling zero-padded numbers as octal.) Other types having
|
||||
// [u]int* as the underlying type, such as os.FileMode and uintptr allow
|
||||
// decimal, hexadecimal, or octal values.
|
||||
// Parsing mode for integer types can be overridden using the struct tag option
|
||||
// ",int=mode" where mode is a combination of the 'd', 'h', and 'o' characters
|
||||
// (each standing for decimal, hexadecimal, and octal, respectively.)
|
||||
//
|
||||
// All other types are parsed using fmt.Sscanf with the "%v" verb.
|
||||
//
|
||||
// For multi-valued variables, each individual value is parsed as above and
|
||||
// appended to the slice. If the first value is specified as a "blank" value
|
||||
// (variable name without equals sign and value), a new slice is allocated;
|
||||
// that is any values previously set in the slice will be ignored.
|
||||
//
|
||||
// The types subpackage for provides helpers for parsing "enum-like" and integer
|
||||
// types.
|
||||
//
|
||||
// TODO
|
||||
//
|
||||
// The following is a list of changes under consideration:
|
||||
// - documentation
|
||||
// - self-contained syntax documentation
|
||||
// - more practical examples
|
||||
// - move TODOs to issue tracker (eventually)
|
||||
// - syntax
|
||||
// - reconsider valid escape sequences
|
||||
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
|
||||
// - reading / parsing gcfg files
|
||||
// - define internal representation structure
|
||||
// - support multiple inputs (readers, strings, files)
|
||||
// - support declaring encoding (?)
|
||||
// - support varying fields sets for subsections (?)
|
||||
// - writing gcfg files
|
||||
// - error handling
|
||||
// - make error context accessible programmatically?
|
||||
// - limit input size?
|
||||
//
|
||||
package gcfg
|
|
@ -0,0 +1,132 @@
|
|||
package gcfg_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
import "github.com/scalingdata/gcfg"
|
||||
|
||||
func ExampleReadStringInto() {
|
||||
cfgStr := `; Comment line
|
||||
[section]
|
||||
name=value # comment`
|
||||
cfg := struct {
|
||||
Section struct {
|
||||
Name string
|
||||
}
|
||||
}{}
|
||||
err := gcfg.ReadStringInto(&cfg, cfgStr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse gcfg data: %s", err)
|
||||
}
|
||||
fmt.Println(cfg.Section.Name)
|
||||
// Output: value
|
||||
}
|
||||
|
||||
func ExampleReadStringInto_bool() {
|
||||
cfgStr := `; Comment line
|
||||
[section]
|
||||
switch=on`
|
||||
cfg := struct {
|
||||
Section struct {
|
||||
Switch bool
|
||||
}
|
||||
}{}
|
||||
err := gcfg.ReadStringInto(&cfg, cfgStr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse gcfg data: %s", err)
|
||||
}
|
||||
fmt.Println(cfg.Section.Switch)
|
||||
// Output: true
|
||||
}
|
||||
|
||||
func ExampleReadStringInto_hyphens() {
|
||||
cfgStr := `; Comment line
|
||||
[section-name]
|
||||
variable-name=value # comment`
|
||||
cfg := struct {
|
||||
Section_Name struct {
|
||||
Variable_Name string
|
||||
}
|
||||
}{}
|
||||
err := gcfg.ReadStringInto(&cfg, cfgStr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse gcfg data: %s", err)
|
||||
}
|
||||
fmt.Println(cfg.Section_Name.Variable_Name)
|
||||
// Output: value
|
||||
}
|
||||
|
||||
func ExampleReadStringInto_tags() {
|
||||
cfgStr := `; Comment line
|
||||
[section]
|
||||
var-name=value # comment`
|
||||
cfg := struct {
|
||||
Section struct {
|
||||
FieldName string `gcfg:"var-name"`
|
||||
}
|
||||
}{}
|
||||
err := gcfg.ReadStringInto(&cfg, cfgStr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse gcfg data: %s", err)
|
||||
}
|
||||
fmt.Println(cfg.Section.FieldName)
|
||||
// Output: value
|
||||
}
|
||||
|
||||
func ExampleReadStringInto_subsections() {
|
||||
cfgStr := `; Comment line
|
||||
[profile "A"]
|
||||
color = white
|
||||
|
||||
[profile "B"]
|
||||
color = black
|
||||
`
|
||||
cfg := struct {
|
||||
Profile map[string]*struct {
|
||||
Color string
|
||||
}
|
||||
}{}
|
||||
err := gcfg.ReadStringInto(&cfg, cfgStr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse gcfg data: %s", err)
|
||||
}
|
||||
fmt.Printf("%s %s\n", cfg.Profile["A"].Color, cfg.Profile["B"].Color)
|
||||
// Output: white black
|
||||
}
|
||||
|
||||
func ExampleReadStringInto_multivalue() {
|
||||
cfgStr := `; Comment line
|
||||
[section]
|
||||
multi=value1
|
||||
multi=value2`
|
||||
cfg := struct {
|
||||
Section struct {
|
||||
Multi []string
|
||||
}
|
||||
}{}
|
||||
err := gcfg.ReadStringInto(&cfg, cfgStr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse gcfg data: %s", err)
|
||||
}
|
||||
fmt.Println(cfg.Section.Multi)
|
||||
// Output: [value1 value2]
|
||||
}
|
||||
|
||||
func ExampleReadStringInto_unicode() {
|
||||
cfgStr := `; Comment line
|
||||
[甲]
|
||||
乙=丙 # comment`
|
||||
cfg := struct {
|
||||
X甲 struct {
|
||||
X乙 string
|
||||
}
|
||||
}{}
|
||||
err := gcfg.ReadStringInto(&cfg, cfgStr)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to parse gcfg data: %s", err)
|
||||
}
|
||||
fmt.Println(cfg.X甲.X乙)
|
||||
// Output: 丙
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
// +build !go1.2
|
||||
|
||||
package gcfg
|
||||
|
||||
type textUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
// +build go1.2
|
||||
|
||||
package gcfg
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
type textUnmarshaler encoding.TextUnmarshaler
|
|
@ -0,0 +1,63 @@
|
|||
package gcfg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
type Config1 struct {
|
||||
Section struct {
|
||||
Int int
|
||||
BigInt big.Int
|
||||
}
|
||||
}
|
||||
|
||||
var testsIssue1 = []struct {
|
||||
cfg string
|
||||
typename string
|
||||
}{
|
||||
{"[section]\nint=X", "int"},
|
||||
{"[section]\nint=", "int"},
|
||||
{"[section]\nint=1A", "int"},
|
||||
{"[section]\nbigint=X", "big.Int"},
|
||||
{"[section]\nbigint=", "big.Int"},
|
||||
{"[section]\nbigint=1A", "big.Int"},
|
||||
}
|
||||
|
||||
// Value parse error should:
|
||||
// - include plain type name
|
||||
// - not include reflect internals
|
||||
func TestIssue1(t *testing.T) {
|
||||
for i, tt := range testsIssue1 {
|
||||
var c Config1
|
||||
err := ReadStringInto(&c, tt.cfg)
|
||||
switch {
|
||||
case err == nil:
|
||||
t.Errorf("%d fail: got ok; wanted error", i)
|
||||
case !strings.Contains(err.Error(), tt.typename):
|
||||
t.Errorf("%d fail: error message doesn't contain type name %q: %v",
|
||||
i, tt.typename, err)
|
||||
case strings.Contains(err.Error(), "reflect"):
|
||||
t.Errorf("%d fail: error message includes reflect internals: %v",
|
||||
i, err)
|
||||
default:
|
||||
t.Logf("%d pass: %v", i, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type confIssue2 struct{ Main struct{ Foo string } }
|
||||
|
||||
var testsIssue2 = []readtest{
|
||||
{"[main]\n;\nfoo = bar\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
|
||||
{"[main]\r\n;\r\nfoo = bar\r\n", &confIssue2{struct{ Foo string }{"bar"}}, true},
|
||||
}
|
||||
|
||||
func TestIssue2(t *testing.T) {
|
||||
for i, tt := range testsIssue2 {
|
||||
id := fmt.Sprintf("issue2:%d", i)
|
||||
testRead(t, id, tt)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
package gcfg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/scalingdata/gcfg/scanner"
|
||||
"github.com/scalingdata/gcfg/token"
|
||||
)
|
||||
|
||||
var unescape = map[rune]rune{'\\': '\\', '"': '"', 'n': '\n', 't': '\t'}
|
||||
|
||||
// no error: invalid literals should be caught by scanner
|
||||
func unquote(s string) string {
|
||||
u, q, esc := make([]rune, 0, len(s)), false, false
|
||||
for _, c := range s {
|
||||
if esc {
|
||||
uc, ok := unescape[c]
|
||||
switch {
|
||||
case ok:
|
||||
u = append(u, uc)
|
||||
fallthrough
|
||||
case !q && c == '\n':
|
||||
esc = false
|
||||
continue
|
||||
}
|
||||
panic("invalid escape sequence")
|
||||
}
|
||||
switch c {
|
||||
case '"':
|
||||
q = !q
|
||||
case '\\':
|
||||
esc = true
|
||||
default:
|
||||
u = append(u, c)
|
||||
}
|
||||
}
|
||||
if q {
|
||||
panic("missing end quote")
|
||||
}
|
||||
if esc {
|
||||
panic("invalid escape sequence")
|
||||
}
|
||||
return string(u)
|
||||
}
|
||||
|
||||
func readInto(config interface{}, fset *token.FileSet, file *token.File, src []byte) error {
|
||||
var s scanner.Scanner
|
||||
var errs scanner.ErrorList
|
||||
s.Init(file, src, func(p token.Position, m string) { errs.Add(p, m) }, 0)
|
||||
sect, sectsub := "", ""
|
||||
pos, tok, lit := s.Scan()
|
||||
errfn := func(msg string) error {
|
||||
return fmt.Errorf("%s: %s", fset.Position(pos), msg)
|
||||
}
|
||||
for {
|
||||
if errs.Len() > 0 {
|
||||
return errs.Err()
|
||||
}
|
||||
switch tok {
|
||||
case token.EOF:
|
||||
return nil
|
||||
case token.EOL, token.COMMENT:
|
||||
pos, tok, lit = s.Scan()
|
||||
case token.LBRACK:
|
||||
pos, tok, lit = s.Scan()
|
||||
if errs.Len() > 0 {
|
||||
return errs.Err()
|
||||
}
|
||||
if tok != token.IDENT {
|
||||
return errfn("expected section name")
|
||||
}
|
||||
sect, sectsub = lit, ""
|
||||
pos, tok, lit = s.Scan()
|
||||
if errs.Len() > 0 {
|
||||
return errs.Err()
|
||||
}
|
||||
if tok == token.STRING {
|
||||
sectsub = unquote(lit)
|
||||
if sectsub == "" {
|
||||
return errfn("empty subsection name")
|
||||
}
|
||||
pos, tok, lit = s.Scan()
|
||||
if errs.Len() > 0 {
|
||||
return errs.Err()
|
||||
}
|
||||
}
|
||||
if tok != token.RBRACK {
|
||||
if sectsub == "" {
|
||||
return errfn("expected subsection name or right bracket")
|
||||
}
|
||||
return errfn("expected right bracket")
|
||||
}
|
||||
pos, tok, lit = s.Scan()
|
||||
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
|
||||
return errfn("expected EOL, EOF, or comment")
|
||||
}
|
||||
case token.IDENT:
|
||||
if sect == "" {
|
||||
return errfn("expected section header")
|
||||
}
|
||||
n := lit
|
||||
pos, tok, lit = s.Scan()
|
||||
if errs.Len() > 0 {
|
||||
return errs.Err()
|
||||
}
|
||||
blank, v := tok == token.EOF || tok == token.EOL || tok == token.COMMENT, ""
|
||||
if !blank {
|
||||
if tok != token.ASSIGN {
|
||||
return errfn("expected '='")
|
||||
}
|
||||
pos, tok, lit = s.Scan()
|
||||
if errs.Len() > 0 {
|
||||
return errs.Err()
|
||||
}
|
||||
if tok != token.STRING {
|
||||
return errfn("expected value")
|
||||
}
|
||||
v = unquote(lit)
|
||||
pos, tok, lit = s.Scan()
|
||||
if errs.Len() > 0 {
|
||||
return errs.Err()
|
||||
}
|
||||
if tok != token.EOL && tok != token.EOF && tok != token.COMMENT {
|
||||
return errfn("expected EOL, EOF, or comment")
|
||||
}
|
||||
}
|
||||
err := set(config, sect, sectsub, n, blank, v)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
if sect == "" {
|
||||
return errfn("expected section header")
|
||||
}
|
||||
return errfn("expected section header or variable declaration")
|
||||
}
|
||||
}
|
||||
panic("never reached")
|
||||
}
|
||||
|
||||
// ReadInto reads gcfg formatted data from reader and sets the values into the
|
||||
// corresponding fields in config.
|
||||
func ReadInto(config interface{}, reader io.Reader) error {
|
||||
src, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fset := token.NewFileSet()
|
||||
file := fset.AddFile("", fset.Base(), len(src))
|
||||
return readInto(config, fset, file, src)
|
||||
}
|
||||
|
||||
// ReadStringInto reads gcfg formatted data from str and sets the values into
|
||||
// the corresponding fields in config.
|
||||
func ReadStringInto(config interface{}, str string) error {
|
||||
r := strings.NewReader(str)
|
||||
return ReadInto(config, r)
|
||||
}
|
||||
|
||||
// ReadFileInto reads gcfg formatted data from the file filename and sets the
|
||||
// values into the corresponding fields in config.
|
||||
func ReadFileInto(config interface{}, filename string) error {
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
src, err := ioutil.ReadAll(f)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fset := token.NewFileSet()
|
||||
file := fset.AddFile(filename, fset.Base(), len(src))
|
||||
return readInto(config, fset, file, src)
|
||||
}
|
|
@ -0,0 +1,333 @@
|
|||
package gcfg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
const (
|
||||
// 64 spaces
|
||||
sp64 = " "
|
||||
// 512 spaces
|
||||
sp512 = sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64 + sp64
|
||||
// 4096 spaces
|
||||
sp4096 = sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512 + sp512
|
||||
)
|
||||
|
||||
type cBasic struct {
|
||||
Section cBasicS1
|
||||
Hyphen_In_Section cBasicS2
|
||||
unexported cBasicS1
|
||||
Exported cBasicS3
|
||||
TagName cBasicS1 `gcfg:"tag-name"`
|
||||
}
|
||||
type cBasicS1 struct {
|
||||
Name string
|
||||
Int int
|
||||
PName *string
|
||||
}
|
||||
type cBasicS2 struct {
|
||||
Hyphen_In_Name string
|
||||
}
|
||||
type cBasicS3 struct {
|
||||
unexported string
|
||||
}
|
||||
|
||||
type nonMulti []string
|
||||
|
||||
type unmarshalable string
|
||||
|
||||
func (u *unmarshalable) UnmarshalText(text []byte) error {
|
||||
s := string(text)
|
||||
if s == "error" {
|
||||
return fmt.Errorf("%s", s)
|
||||
}
|
||||
*u = unmarshalable(s)
|
||||
return nil
|
||||
}
|
||||
|
||||
var _ textUnmarshaler = new(unmarshalable)
|
||||
|
||||
type cUni struct {
|
||||
X甲 cUniS1
|
||||
XSection cUniS2
|
||||
}
|
||||
type cUniS1 struct {
|
||||
X乙 string
|
||||
}
|
||||
type cUniS2 struct {
|
||||
XName string
|
||||
}
|
||||
|
||||
type cMulti struct {
|
||||
M1 cMultiS1
|
||||
M2 cMultiS2
|
||||
M3 cMultiS3
|
||||
}
|
||||
type cMultiS1 struct{ Multi []string }
|
||||
type cMultiS2 struct{ NonMulti nonMulti }
|
||||
type cMultiS3 struct{ MultiInt []int }
|
||||
|
||||
type cSubs struct{ Sub map[string]*cSubsS1 }
|
||||
type cSubsS1 struct{ Name string }
|
||||
|
||||
type cBool struct{ Section cBoolS1 }
|
||||
type cBoolS1 struct{ Bool bool }
|
||||
|
||||
type cTxUnm struct{ Section cTxUnmS1 }
|
||||
type cTxUnmS1 struct{ Name unmarshalable }
|
||||
|
||||
type cNum struct {
|
||||
N1 cNumS1
|
||||
N2 cNumS2
|
||||
N3 cNumS3
|
||||
}
|
||||
type cNumS1 struct {
|
||||
Int int
|
||||
IntDHO int `gcfg:",int=dho"`
|
||||
Big *big.Int
|
||||
}
|
||||
type cNumS2 struct {
|
||||
MultiInt []int
|
||||
MultiBig []*big.Int
|
||||
}
|
||||
type cNumS3 struct{ FileMode os.FileMode }
|
||||
type readtest struct {
|
||||
gcfg string
|
||||
exp interface{}
|
||||
ok bool
|
||||
}
|
||||
|
||||
func newString(s string) *string {
|
||||
return &s
|
||||
}
|
||||
|
||||
var readtests = []struct {
|
||||
group string
|
||||
tests []readtest
|
||||
}{{"scanning", []readtest{
|
||||
{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
// hyphen in name
|
||||
{"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
|
||||
// quoted string value
|
||||
{"[section]\nname=\"\"", &cBasic{Section: cBasicS1{Name: ""}}, true},
|
||||
{"[section]\nname=\" \"", &cBasic{Section: cBasicS1{Name: " "}}, true},
|
||||
{"[section]\nname=\"value\"", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname=\" value \"", &cBasic{Section: cBasicS1{Name: " value "}}, true},
|
||||
{"\n[section]\nname=\"va ; lue\"", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
|
||||
{"[section]\nname=\"val\" \"ue\"", &cBasic{Section: cBasicS1{Name: "val ue"}}, true},
|
||||
{"[section]\nname=\"value", &cBasic{}, false},
|
||||
// escape sequences
|
||||
{"[section]\nname=\"va\\\\lue\"", &cBasic{Section: cBasicS1{Name: "va\\lue"}}, true},
|
||||
{"[section]\nname=\"va\\\"lue\"", &cBasic{Section: cBasicS1{Name: "va\"lue"}}, true},
|
||||
{"[section]\nname=\"va\\nlue\"", &cBasic{Section: cBasicS1{Name: "va\nlue"}}, true},
|
||||
{"[section]\nname=\"va\\tlue\"", &cBasic{Section: cBasicS1{Name: "va\tlue"}}, true},
|
||||
{"\n[section]\nname=\\", &cBasic{}, false},
|
||||
{"\n[section]\nname=\\a", &cBasic{}, false},
|
||||
{"\n[section]\nname=\"val\\a\"", &cBasic{}, false},
|
||||
{"\n[section]\nname=val\\", &cBasic{}, false},
|
||||
{"\n[sub \"A\\\n\"]\nname=value", &cSubs{}, false},
|
||||
{"\n[sub \"A\\\t\"]\nname=value", &cSubs{}, false},
|
||||
// broken line
|
||||
{"[section]\nname=value \\\n value", &cBasic{Section: cBasicS1{Name: "value value"}}, true},
|
||||
{"[section]\nname=\"value \\\n value\"", &cBasic{}, false},
|
||||
}}, {"scanning:whitespace", []readtest{
|
||||
{" \n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{" [section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\t[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[ section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section ]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\n name=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname =value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname= value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname=value ", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\r\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{";cmnt\r\n[section]\r\nname=value\r\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
// long lines
|
||||
{sp4096 + "[section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[" + sp4096 + "section]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section" + sp4096 + "]\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]" + sp4096 + "\nname=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\n" + sp4096 + "name=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname" + sp4096 + "=value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname=" + sp4096 + "value\n", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname=value\n" + sp4096, &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
}}, {"scanning:comments", []readtest{
|
||||
{"; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"# cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{" ; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\t; cmnt\n[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\n[section]; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\n[section] ; cmnt\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\n[section]\nname=value; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\n[section]\nname=value ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\n[section]\nname=\"value\" ; cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\n[section]\nname=value ; \"cmnt", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"\n[section]\nname=\"va ; lue\" ; cmnt", &cBasic{Section: cBasicS1{Name: "va ; lue"}}, true},
|
||||
{"\n[section]\nname=; cmnt", &cBasic{Section: cBasicS1{Name: ""}}, true},
|
||||
}}, {"scanning:subsections", []readtest{
|
||||
{"\n[sub \"A\"]\nname=value", &cSubs{map[string]*cSubsS1{"A": &cSubsS1{"value"}}}, true},
|
||||
{"\n[sub \"b\"]\nname=value", &cSubs{map[string]*cSubsS1{"b": &cSubsS1{"value"}}}, true},
|
||||
{"\n[sub \"A\\\\\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\\": &cSubsS1{"value"}}}, true},
|
||||
{"\n[sub \"A\\\"\"]\nname=value", &cSubs{map[string]*cSubsS1{"A\"": &cSubsS1{"value"}}}, true},
|
||||
}}, {"syntax", []readtest{
|
||||
// invalid line
|
||||
{"\n[section]\n=", &cBasic{}, false},
|
||||
// no section
|
||||
{"name=value", &cBasic{}, false},
|
||||
// empty section
|
||||
{"\n[]\nname=value", &cBasic{}, false},
|
||||
// empty subsection
|
||||
{"\n[sub \"\"]\nname=value", &cSubs{}, false},
|
||||
}}, {"setting", []readtest{
|
||||
{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
// pointer
|
||||
{"[section]", &cBasic{Section: cBasicS1{PName: nil}}, true},
|
||||
{"[section]\npname=value", &cBasic{Section: cBasicS1{PName: newString("value")}}, true},
|
||||
// section name not matched
|
||||
{"\n[nonexistent]\nname=value", &cBasic{}, false},
|
||||
// subsection name not matched
|
||||
{"\n[section \"nonexistent\"]\nname=value", &cBasic{}, false},
|
||||
// variable name not matched
|
||||
{"\n[section]\nnonexistent=value", &cBasic{}, false},
|
||||
// hyphen in name
|
||||
{"[hyphen-in-section]\nhyphen-in-name=value", &cBasic{Hyphen_In_Section: cBasicS2{Hyphen_In_Name: "value"}}, true},
|
||||
// ignore unexported fields
|
||||
{"[unexported]\nname=value", &cBasic{}, false},
|
||||
{"[exported]\nunexported=value", &cBasic{}, false},
|
||||
// 'X' prefix for non-upper/lower-case letters
|
||||
{"[甲]\n乙=丙", &cUni{X甲: cUniS1{X乙: "丙"}}, true},
|
||||
//{"[section]\nxname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
|
||||
//{"[xsection]\nname=value", &cBasic{XSection: cBasicS4{XName: "value"}}, false},
|
||||
// name specified as struct tag
|
||||
{"[tag-name]\nname=value", &cBasic{TagName: cBasicS1{Name: "value"}}, true},
|
||||
}}, {"multivalue", []readtest{
|
||||
// unnamed slice type: treat as multi-value
|
||||
{"\n[m1]", &cMulti{M1: cMultiS1{}}, true},
|
||||
{"\n[m1]\nmulti=value", &cMulti{M1: cMultiS1{[]string{"value"}}}, true},
|
||||
{"\n[m1]\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
|
||||
// "blank" empties multi-valued slice -- here same result as above
|
||||
{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true},
|
||||
// named slice type: do not treat as multi-value
|
||||
{"\n[m2]", &cMulti{}, true},
|
||||
{"\n[m2]\nmulti=value", &cMulti{}, false},
|
||||
{"\n[m2]\nmulti=value1\nmulti=value2", &cMulti{}, false},
|
||||
}}, {"type:string", []readtest{
|
||||
{"[section]\nname=value", &cBasic{Section: cBasicS1{Name: "value"}}, true},
|
||||
{"[section]\nname=", &cBasic{Section: cBasicS1{Name: ""}}, true},
|
||||
}}, {"type:bool", []readtest{
|
||||
// explicit values
|
||||
{"[section]\nbool=true", &cBool{cBoolS1{true}}, true},
|
||||
{"[section]\nbool=yes", &cBool{cBoolS1{true}}, true},
|
||||
{"[section]\nbool=on", &cBool{cBoolS1{true}}, true},
|
||||
{"[section]\nbool=1", &cBool{cBoolS1{true}}, true},
|
||||
{"[section]\nbool=tRuE", &cBool{cBoolS1{true}}, true},
|
||||
{"[section]\nbool=false", &cBool{cBoolS1{false}}, true},
|
||||
{"[section]\nbool=no", &cBool{cBoolS1{false}}, true},
|
||||
{"[section]\nbool=off", &cBool{cBoolS1{false}}, true},
|
||||
{"[section]\nbool=0", &cBool{cBoolS1{false}}, true},
|
||||
{"[section]\nbool=NO", &cBool{cBoolS1{false}}, true},
|
||||
// "blank" value handled as true
|
||||
{"[section]\nbool", &cBool{cBoolS1{true}}, true},
|
||||
// bool parse errors
|
||||
{"[section]\nbool=maybe", &cBool{}, false},
|
||||
{"[section]\nbool=t", &cBool{}, false},
|
||||
{"[section]\nbool=truer", &cBool{}, false},
|
||||
{"[section]\nbool=2", &cBool{}, false},
|
||||
{"[section]\nbool=-1", &cBool{}, false},
|
||||
}}, {"type:numeric", []readtest{
|
||||
{"[section]\nint=0", &cBasic{Section: cBasicS1{Int: 0}}, true},
|
||||
{"[section]\nint=1", &cBasic{Section: cBasicS1{Int: 1}}, true},
|
||||
{"[section]\nint=-1", &cBasic{Section: cBasicS1{Int: -1}}, true},
|
||||
{"[section]\nint=0.2", &cBasic{}, false},
|
||||
{"[section]\nint=1e3", &cBasic{}, false},
|
||||
// primitive [u]int(|8|16|32|64) and big.Int is parsed as dec or hex (not octal)
|
||||
{"[n1]\nint=010", &cNum{N1: cNumS1{Int: 10}}, true},
|
||||
{"[n1]\nint=0x10", &cNum{N1: cNumS1{Int: 0x10}}, true},
|
||||
{"[n1]\nbig=1", &cNum{N1: cNumS1{Big: big.NewInt(1)}}, true},
|
||||
{"[n1]\nbig=0x10", &cNum{N1: cNumS1{Big: big.NewInt(0x10)}}, true},
|
||||
{"[n1]\nbig=010", &cNum{N1: cNumS1{Big: big.NewInt(10)}}, true},
|
||||
{"[n2]\nmultiint=010", &cNum{N2: cNumS2{MultiInt: []int{10}}}, true},
|
||||
{"[n2]\nmultibig=010", &cNum{N2: cNumS2{MultiBig: []*big.Int{big.NewInt(10)}}}, true},
|
||||
// set parse mode for int types via struct tag
|
||||
{"[n1]\nintdho=010", &cNum{N1: cNumS1{IntDHO: 010}}, true},
|
||||
// octal allowed for named type
|
||||
{"[n3]\nfilemode=0777", &cNum{N3: cNumS3{FileMode: 0777}}, true},
|
||||
}}, {"type:textUnmarshaler", []readtest{
|
||||
{"[section]\nname=value", &cTxUnm{Section: cTxUnmS1{Name: "value"}}, true},
|
||||
{"[section]\nname=error", &cTxUnm{}, false},
|
||||
}},
|
||||
}
|
||||
|
||||
func TestReadStringInto(t *testing.T) {
|
||||
for _, tg := range readtests {
|
||||
for i, tt := range tg.tests {
|
||||
id := fmt.Sprintf("%s:%d", tg.group, i)
|
||||
testRead(t, id, tt)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadStringIntoMultiBlankPreset(t *testing.T) {
|
||||
tt := readtest{"\n[m1]\nmulti\nmulti=value1\nmulti=value2", &cMulti{M1: cMultiS1{[]string{"value1", "value2"}}}, true}
|
||||
cfg := &cMulti{M1: cMultiS1{[]string{"preset1", "preset2"}}}
|
||||
testReadInto(t, "multi:blank", tt, cfg)
|
||||
}
|
||||
|
||||
func testRead(t *testing.T, id string, tt readtest) {
|
||||
// get the type of the expected result
|
||||
restyp := reflect.TypeOf(tt.exp).Elem()
|
||||
// create a new instance to hold the actual result
|
||||
res := reflect.New(restyp).Interface()
|
||||
testReadInto(t, id, tt, res)
|
||||
}
|
||||
|
||||
func testReadInto(t *testing.T, id string, tt readtest, res interface{}) {
|
||||
err := ReadStringInto(res, tt.gcfg)
|
||||
if tt.ok {
|
||||
if err != nil {
|
||||
t.Errorf("%s fail: got error %v, wanted ok", id, err)
|
||||
return
|
||||
} else if !reflect.DeepEqual(res, tt.exp) {
|
||||
t.Errorf("%s fail: got value %#v, wanted value %#v", id, res, tt.exp)
|
||||
return
|
||||
}
|
||||
if !testing.Short() {
|
||||
t.Logf("%s pass: got value %#v", id, res)
|
||||
}
|
||||
} else { // !tt.ok
|
||||
if err == nil {
|
||||
t.Errorf("%s fail: got value %#v, wanted error", id, res)
|
||||
return
|
||||
}
|
||||
if !testing.Short() {
|
||||
t.Logf("%s pass: got error %v", id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFileInto(t *testing.T) {
|
||||
res := &struct{ Section struct{ Name string } }{}
|
||||
err := ReadFileInto(res, "testdata/gcfg_test.gcfg")
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if "value" != res.Section.Name {
|
||||
t.Errorf("got %q, wanted %q", res.Section.Name, "value")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadFileIntoUnicode(t *testing.T) {
|
||||
res := &struct{ X甲 struct{ X乙 string } }{}
|
||||
err := ReadFileInto(res, "testdata/gcfg_unicode_test.gcfg")
|
||||
if err != nil {
|
||||
t.Errorf(err.Error())
|
||||
}
|
||||
if "丙" != res.X甲.X乙 {
|
||||
t.Errorf("got %q, wanted %q", res.X甲.X乙, "丙")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/scalingdata/gcfg/token"
|
||||
)
|
||||
|
||||
// In an ErrorList, an error is represented by an *Error.
|
||||
// The position Pos, if valid, points to the beginning of
|
||||
// the offending token, and the error condition is described
|
||||
// by Msg.
|
||||
//
|
||||
type Error struct {
|
||||
Pos token.Position
|
||||
Msg string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (e Error) Error() string {
|
||||
if e.Pos.Filename != "" || e.Pos.IsValid() {
|
||||
// don't print "<unknown position>"
|
||||
// TODO(gri) reconsider the semantics of Position.IsValid
|
||||
return e.Pos.String() + ": " + e.Msg
|
||||
}
|
||||
return e.Msg
|
||||
}
|
||||
|
||||
// ErrorList is a list of *Errors.
|
||||
// The zero value for an ErrorList is an empty ErrorList ready to use.
|
||||
//
|
||||
type ErrorList []*Error
|
||||
|
||||
// Add adds an Error with given position and error message to an ErrorList.
|
||||
func (p *ErrorList) Add(pos token.Position, msg string) {
|
||||
*p = append(*p, &Error{pos, msg})
|
||||
}
|
||||
|
||||
// Reset resets an ErrorList to no errors.
|
||||
func (p *ErrorList) Reset() { *p = (*p)[0:0] }
|
||||
|
||||
// ErrorList implements the sort Interface.
|
||||
func (p ErrorList) Len() int { return len(p) }
|
||||
func (p ErrorList) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
|
||||
|
||||
func (p ErrorList) Less(i, j int) bool {
|
||||
e := &p[i].Pos
|
||||
f := &p[j].Pos
|
||||
if e.Filename < f.Filename {
|
||||
return true
|
||||
}
|
||||
if e.Filename == f.Filename {
|
||||
return e.Offset < f.Offset
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Sort sorts an ErrorList. *Error entries are sorted by position,
|
||||
// other errors are sorted by error message, and before any *Error
|
||||
// entry.
|
||||
//
|
||||
func (p ErrorList) Sort() {
|
||||
sort.Sort(p)
|
||||
}
|
||||
|
||||
// RemoveMultiples sorts an ErrorList and removes all but the first error per line.
|
||||
func (p *ErrorList) RemoveMultiples() {
|
||||
sort.Sort(p)
|
||||
var last token.Position // initial last.Line is != any legal error line
|
||||
i := 0
|
||||
for _, e := range *p {
|
||||
if e.Pos.Filename != last.Filename || e.Pos.Line != last.Line {
|
||||
last = e.Pos
|
||||
(*p)[i] = e
|
||||
i++
|
||||
}
|
||||
}
|
||||
(*p) = (*p)[0:i]
|
||||
}
|
||||
|
||||
// An ErrorList implements the error interface.
|
||||
func (p ErrorList) Error() string {
|
||||
switch len(p) {
|
||||
case 0:
|
||||
return "no errors"
|
||||
case 1:
|
||||
return p[0].Error()
|
||||
}
|
||||
return fmt.Sprintf("%s (and %d more errors)", p[0], len(p)-1)
|
||||
}
|
||||
|
||||
// Err returns an error equivalent to this error list.
|
||||
// If the list is empty, Err returns nil.
|
||||
func (p ErrorList) Err() error {
|
||||
if len(p) == 0 {
|
||||
return nil
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
// PrintError is a utility function that prints a list of errors to w,
|
||||
// one error per line, if the err parameter is an ErrorList. Otherwise
|
||||
// it prints the err string.
|
||||
//
|
||||
func PrintError(w io.Writer, err error) {
|
||||
if list, ok := err.(ErrorList); ok {
|
||||
for _, e := range list {
|
||||
fmt.Fprintf(w, "%s\n", e)
|
||||
}
|
||||
} else if err != nil {
|
||||
fmt.Fprintf(w, "%s\n", err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2012 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package scanner_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/scalingdata/gcfg/scanner"
|
||||
"github.com/scalingdata/gcfg/token"
|
||||
)
|
||||
|
||||
func ExampleScanner_Scan() {
|
||||
// src is the input that we want to tokenize.
|
||||
src := []byte(`[profile "A"]
|
||||
color = blue ; Comment`)
|
||||
|
||||
// Initialize the scanner.
|
||||
var s scanner.Scanner
|
||||
fset := token.NewFileSet() // positions are relative to fset
|
||||
file := fset.AddFile("", fset.Base(), len(src)) // register input "file"
|
||||
s.Init(file, src, nil /* no error handler */, scanner.ScanComments)
|
||||
|
||||
// Repeated calls to Scan yield the token sequence found in the input.
|
||||
for {
|
||||
pos, tok, lit := s.Scan()
|
||||
if tok == token.EOF {
|
||||
break
|
||||
}
|
||||
fmt.Printf("%s\t%q\t%q\n", fset.Position(pos), tok, lit)
|
||||
}
|
||||
|
||||
// output:
|
||||
// 1:1 "[" ""
|
||||
// 1:2 "IDENT" "profile"
|
||||
// 1:10 "STRING" "\"A\""
|
||||
// 1:13 "]" ""
|
||||
// 1:14 "\n" ""
|
||||
// 2:1 "IDENT" "color"
|
||||
// 2:7 "=" ""
|
||||
// 2:9 "STRING" "blue"
|
||||
// 2:14 "COMMENT" "; Comment"
|
||||
}
|
|
@ -0,0 +1,342 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package scanner implements a scanner for gcfg configuration text.
|
||||
// It takes a []byte as source which can then be tokenized
|
||||
// through repeated calls to the Scan method.
|
||||
//
|
||||
// Note that the API for the scanner package may change to accommodate new
|
||||
// features or implementation changes in gcfg.
|
||||
//
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/scalingdata/gcfg/token"
|
||||
)
|
||||
|
||||
// An ErrorHandler may be provided to Scanner.Init. If a syntax error is
|
||||
// encountered and a handler was installed, the handler is called with a
|
||||
// position and an error message. The position points to the beginning of
|
||||
// the offending token.
|
||||
//
|
||||
type ErrorHandler func(pos token.Position, msg string)
|
||||
|
||||
// A Scanner holds the scanner's internal state while processing
|
||||
// a given text. It can be allocated as part of another data
|
||||
// structure but must be initialized via Init before use.
|
||||
//
|
||||
type Scanner struct {
|
||||
// immutable state
|
||||
file *token.File // source file handle
|
||||
dir string // directory portion of file.Name()
|
||||
src []byte // source
|
||||
err ErrorHandler // error reporting; or nil
|
||||
mode Mode // scanning mode
|
||||
|
||||
// scanning state
|
||||
ch rune // current character
|
||||
offset int // character offset
|
||||
rdOffset int // reading offset (position after current character)
|
||||
lineOffset int // current line offset
|
||||
nextVal bool // next token is expected to be a value
|
||||
|
||||
// public state - ok to modify
|
||||
ErrorCount int // number of errors encountered
|
||||
}
|
||||
|
||||
// Read the next Unicode char into s.ch.
|
||||
// s.ch < 0 means end-of-file.
|
||||
//
|
||||
func (s *Scanner) next() {
|
||||
if s.rdOffset < len(s.src) {
|
||||
s.offset = s.rdOffset
|
||||
if s.ch == '\n' {
|
||||
s.lineOffset = s.offset
|
||||
s.file.AddLine(s.offset)
|
||||
}
|
||||
r, w := rune(s.src[s.rdOffset]), 1
|
||||
switch {
|
||||
case r == 0:
|
||||
s.error(s.offset, "illegal character NUL")
|
||||
case r >= 0x80:
|
||||
// not ASCII
|
||||
r, w = utf8.DecodeRune(s.src[s.rdOffset:])
|
||||
if r == utf8.RuneError && w == 1 {
|
||||
s.error(s.offset, "illegal UTF-8 encoding")
|
||||
}
|
||||
}
|
||||
s.rdOffset += w
|
||||
s.ch = r
|
||||
} else {
|
||||
s.offset = len(s.src)
|
||||
if s.ch == '\n' {
|
||||
s.lineOffset = s.offset
|
||||
s.file.AddLine(s.offset)
|
||||
}
|
||||
s.ch = -1 // eof
|
||||
}
|
||||
}
|
||||
|
||||
// A mode value is a set of flags (or 0).
|
||||
// They control scanner behavior.
|
||||
//
|
||||
type Mode uint
|
||||
|
||||
const (
|
||||
ScanComments Mode = 1 << iota // return comments as COMMENT tokens
|
||||
)
|
||||
|
||||
// Init prepares the scanner s to tokenize the text src by setting the
|
||||
// scanner at the beginning of src. The scanner uses the file set file
|
||||
// for position information and it adds line information for each line.
|
||||
// It is ok to re-use the same file when re-scanning the same file as
|
||||
// line information which is already present is ignored. Init causes a
|
||||
// panic if the file size does not match the src size.
|
||||
//
|
||||
// Calls to Scan will invoke the error handler err if they encounter a
|
||||
// syntax error and err is not nil. Also, for each error encountered,
|
||||
// the Scanner field ErrorCount is incremented by one. The mode parameter
|
||||
// determines how comments are handled.
|
||||
//
|
||||
// Note that Init may call err if there is an error in the first character
|
||||
// of the file.
|
||||
//
|
||||
func (s *Scanner) Init(file *token.File, src []byte, err ErrorHandler, mode Mode) {
|
||||
// Explicitly initialize all fields since a scanner may be reused.
|
||||
if file.Size() != len(src) {
|
||||
panic(fmt.Sprintf("file size (%d) does not match src len (%d)", file.Size(), len(src)))
|
||||
}
|
||||
s.file = file
|
||||
s.dir, _ = filepath.Split(file.Name())
|
||||
s.src = src
|
||||
s.err = err
|
||||
s.mode = mode
|
||||
|
||||
s.ch = ' '
|
||||
s.offset = 0
|
||||
s.rdOffset = 0
|
||||
s.lineOffset = 0
|
||||
s.ErrorCount = 0
|
||||
s.nextVal = false
|
||||
|
||||
s.next()
|
||||
}
|
||||
|
||||
func (s *Scanner) error(offs int, msg string) {
|
||||
if s.err != nil {
|
||||
s.err(s.file.Position(s.file.Pos(offs)), msg)
|
||||
}
|
||||
s.ErrorCount++
|
||||
}
|
||||
|
||||
func (s *Scanner) scanComment() string {
|
||||
// initial [;#] already consumed
|
||||
offs := s.offset - 1 // position of initial [;#]
|
||||
|
||||
for s.ch != '\n' && s.ch >= 0 {
|
||||
s.next()
|
||||
}
|
||||
return string(s.src[offs:s.offset])
|
||||
}
|
||||
|
||||
func isLetter(ch rune) bool {
|
||||
return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch >= 0x80 && unicode.IsLetter(ch)
|
||||
}
|
||||
|
||||
func isDigit(ch rune) bool {
|
||||
return '0' <= ch && ch <= '9' || ch >= 0x80 && unicode.IsDigit(ch)
|
||||
}
|
||||
|
||||
func (s *Scanner) scanIdentifier() string {
|
||||
offs := s.offset
|
||||
for isLetter(s.ch) || isDigit(s.ch) || s.ch == '-' {
|
||||
s.next()
|
||||
}
|
||||
return string(s.src[offs:s.offset])
|
||||
}
|
||||
|
||||
func (s *Scanner) scanEscape(val bool) {
|
||||
offs := s.offset
|
||||
ch := s.ch
|
||||
s.next() // always make progress
|
||||
switch ch {
|
||||
case '\\', '"':
|
||||
// ok
|
||||
case 'n', 't':
|
||||
if val {
|
||||
break // ok
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
s.error(offs, "unknown escape sequence")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Scanner) scanString() string {
|
||||
// '"' opening already consumed
|
||||
offs := s.offset - 1
|
||||
|
||||
for s.ch != '"' {
|
||||
ch := s.ch
|
||||
s.next()
|
||||
if ch == '\n' || ch < 0 {
|
||||
s.error(offs, "string not terminated")
|
||||
break
|
||||
}
|
||||
if ch == '\\' {
|
||||
s.scanEscape(false)
|
||||
}
|
||||
}
|
||||
|
||||
s.next()
|
||||
|
||||
return string(s.src[offs:s.offset])
|
||||
}
|
||||
|
||||
func stripCR(b []byte) []byte {
|
||||
c := make([]byte, len(b))
|
||||
i := 0
|
||||
for _, ch := range b {
|
||||
if ch != '\r' {
|
||||
c[i] = ch
|
||||
i++
|
||||
}
|
||||
}
|
||||
return c[:i]
|
||||
}
|
||||
|
||||
func (s *Scanner) scanValString() string {
|
||||
offs := s.offset
|
||||
|
||||
hasCR := false
|
||||
end := offs
|
||||
inQuote := false
|
||||
loop:
|
||||
for inQuote || s.ch >= 0 && s.ch != '\n' && s.ch != ';' && s.ch != '#' {
|
||||
ch := s.ch
|
||||
s.next()
|
||||
switch {
|
||||
case inQuote && ch == '\\':
|
||||
s.scanEscape(true)
|
||||
case !inQuote && ch == '\\':
|
||||
if s.ch == '\r' {
|
||||
hasCR = true
|
||||
s.next()
|
||||
}
|
||||
if s.ch != '\n' {
|
||||
s.error(offs, "unquoted '\\' must be followed by new line")
|
||||
break loop
|
||||
}
|
||||
s.next()
|
||||
case ch == '"':
|
||||
inQuote = !inQuote
|
||||
case ch == '\r':
|
||||
hasCR = true
|
||||
case ch < 0 || inQuote && ch == '\n':
|
||||
s.error(offs, "string not terminated")
|
||||
break loop
|
||||
}
|
||||
if inQuote || !isWhiteSpace(ch) {
|
||||
end = s.offset
|
||||
}
|
||||
}
|
||||
|
||||
lit := s.src[offs:end]
|
||||
if hasCR {
|
||||
lit = stripCR(lit)
|
||||
}
|
||||
|
||||
return string(lit)
|
||||
}
|
||||
|
||||
func isWhiteSpace(ch rune) bool {
|
||||
return ch == ' ' || ch == '\t' || ch == '\r'
|
||||
}
|
||||
|
||||
func (s *Scanner) skipWhitespace() {
|
||||
for isWhiteSpace(s.ch) {
|
||||
s.next()
|
||||
}
|
||||
}
|
||||
|
||||
// Scan scans the next token and returns the token position, the token,
|
||||
// and its literal string if applicable. The source end is indicated by
|
||||
// token.EOF.
|
||||
//
|
||||
// If the returned token is a literal (token.IDENT, token.STRING) or
|
||||
// token.COMMENT, the literal string has the corresponding value.
|
||||
//
|
||||
// If the returned token is token.ILLEGAL, the literal string is the
|
||||
// offending character.
|
||||
//
|
||||
// In all other cases, Scan returns an empty literal string.
|
||||
//
|
||||
// For more tolerant parsing, Scan will return a valid token if
|
||||
// possible even if a syntax error was encountered. Thus, even
|
||||
// if the resulting token sequence contains no illegal tokens,
|
||||
// a client may not assume that no error occurred. Instead it
|
||||
// must check the scanner's ErrorCount or the number of calls
|
||||
// of the error handler, if there was one installed.
|
||||
//
|
||||
// Scan adds line information to the file added to the file
|
||||
// set with Init. Token positions are relative to that file
|
||||
// and thus relative to the file set.
|
||||
//
|
||||
func (s *Scanner) Scan() (pos token.Pos, tok token.Token, lit string) {
|
||||
scanAgain:
|
||||
s.skipWhitespace()
|
||||
|
||||
// current token start
|
||||
pos = s.file.Pos(s.offset)
|
||||
|
||||
// determine token value
|
||||
switch ch := s.ch; {
|
||||
case s.nextVal:
|
||||
lit = s.scanValString()
|
||||
tok = token.STRING
|
||||
s.nextVal = false
|
||||
case isLetter(ch):
|
||||
lit = s.scanIdentifier()
|
||||
tok = token.IDENT
|
||||
default:
|
||||
s.next() // always make progress
|
||||
switch ch {
|
||||
case -1:
|
||||
tok = token.EOF
|
||||
case '\n':
|
||||
tok = token.EOL
|
||||
case '"':
|
||||
tok = token.STRING
|
||||
lit = s.scanString()
|
||||
case '[':
|
||||
tok = token.LBRACK
|
||||
case ']':
|
||||
tok = token.RBRACK
|
||||
case ';', '#':
|
||||
// comment
|
||||
lit = s.scanComment()
|
||||
if s.mode&ScanComments == 0 {
|
||||
// skip comment
|
||||
goto scanAgain
|
||||
}
|
||||
tok = token.COMMENT
|
||||
case '=':
|
||||
tok = token.ASSIGN
|
||||
s.nextVal = true
|
||||
default:
|
||||
s.error(s.file.Offset(pos), fmt.Sprintf("illegal character %#U", ch))
|
||||
tok = token.ILLEGAL
|
||||
lit = string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,417 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package scanner
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
import (
|
||||
"github.com/scalingdata/gcfg/token"
|
||||
)
|
||||
|
||||
var fset = token.NewFileSet()
|
||||
|
||||
const /* class */ (
|
||||
special = iota
|
||||
literal
|
||||
operator
|
||||
)
|
||||
|
||||
func tokenclass(tok token.Token) int {
|
||||
switch {
|
||||
case tok.IsLiteral():
|
||||
return literal
|
||||
case tok.IsOperator():
|
||||
return operator
|
||||
}
|
||||
return special
|
||||
}
|
||||
|
||||
type elt struct {
|
||||
tok token.Token
|
||||
lit string
|
||||
class int
|
||||
pre string
|
||||
suf string
|
||||
}
|
||||
|
||||
var tokens = [...]elt{
|
||||
// Special tokens
|
||||
{token.COMMENT, "; a comment", special, "", "\n"},
|
||||
{token.COMMENT, "# a comment", special, "", "\n"},
|
||||
|
||||
// Operators and delimiters
|
||||
{token.ASSIGN, "=", operator, "", "value"},
|
||||
{token.LBRACK, "[", operator, "", ""},
|
||||
{token.RBRACK, "]", operator, "", ""},
|
||||
{token.EOL, "\n", operator, "", ""},
|
||||
|
||||
// Identifiers
|
||||
{token.IDENT, "foobar", literal, "", ""},
|
||||
{token.IDENT, "a۰۱۸", literal, "", ""},
|
||||
{token.IDENT, "foo६४", literal, "", ""},
|
||||
{token.IDENT, "bar9876", literal, "", ""},
|
||||
{token.IDENT, "foo-bar", literal, "", ""},
|
||||
{token.IDENT, "foo", literal, ";\n", ""},
|
||||
// String literals (subsection names)
|
||||
{token.STRING, `"foobar"`, literal, "", ""},
|
||||
{token.STRING, `"\""`, literal, "", ""},
|
||||
// String literals (values)
|
||||
{token.STRING, `"\n"`, literal, "=", ""},
|
||||
{token.STRING, `"foobar"`, literal, "=", ""},
|
||||
{token.STRING, `"foo\nbar"`, literal, "=", ""},
|
||||
{token.STRING, `"foo\"bar"`, literal, "=", ""},
|
||||
{token.STRING, `"foo\\bar"`, literal, "=", ""},
|
||||
{token.STRING, `"foobar"`, literal, "=", ""},
|
||||
{token.STRING, `"foobar"`, literal, "= ", ""},
|
||||
{token.STRING, `"foobar"`, literal, "=", "\n"},
|
||||
{token.STRING, `"foobar"`, literal, "=", ";"},
|
||||
{token.STRING, `"foobar"`, literal, "=", " ;"},
|
||||
{token.STRING, `"foobar"`, literal, "=", "#"},
|
||||
{token.STRING, `"foobar"`, literal, "=", " #"},
|
||||
{token.STRING, "foobar", literal, "=", ""},
|
||||
{token.STRING, "foobar", literal, "= ", ""},
|
||||
{token.STRING, "foobar", literal, "=", " "},
|
||||
{token.STRING, `"foo" "bar"`, literal, "=", " "},
|
||||
{token.STRING, "foo\\\nbar", literal, "=", ""},
|
||||
{token.STRING, "foo\\\r\nbar", literal, "=", ""},
|
||||
}
|
||||
|
||||
const whitespace = " \t \n\n\n" // to separate tokens
|
||||
|
||||
var source = func() []byte {
|
||||
var src []byte
|
||||
for _, t := range tokens {
|
||||
src = append(src, t.pre...)
|
||||
src = append(src, t.lit...)
|
||||
src = append(src, t.suf...)
|
||||
src = append(src, whitespace...)
|
||||
}
|
||||
return src
|
||||
}()
|
||||
|
||||
func newlineCount(s string) int {
|
||||
n := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == '\n' {
|
||||
n++
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func checkPos(t *testing.T, lit string, p token.Pos, expected token.Position) {
|
||||
pos := fset.Position(p)
|
||||
if pos.Filename != expected.Filename {
|
||||
t.Errorf("bad filename for %q: got %s, expected %s", lit, pos.Filename, expected.Filename)
|
||||
}
|
||||
if pos.Offset != expected.Offset {
|
||||
t.Errorf("bad position for %q: got %d, expected %d", lit, pos.Offset, expected.Offset)
|
||||
}
|
||||
if pos.Line != expected.Line {
|
||||
t.Errorf("bad line for %q: got %d, expected %d", lit, pos.Line, expected.Line)
|
||||
}
|
||||
if pos.Column != expected.Column {
|
||||
t.Errorf("bad column for %q: got %d, expected %d", lit, pos.Column, expected.Column)
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that calling Scan() provides the correct results.
|
||||
func TestScan(t *testing.T) {
|
||||
// make source
|
||||
src_linecount := newlineCount(string(source))
|
||||
whitespace_linecount := newlineCount(whitespace)
|
||||
|
||||
index := 0
|
||||
|
||||
// error handler
|
||||
eh := func(_ token.Position, msg string) {
|
||||
t.Errorf("%d: error handler called (msg = %s)", index, msg)
|
||||
}
|
||||
|
||||
// verify scan
|
||||
var s Scanner
|
||||
s.Init(fset.AddFile("", fset.Base(), len(source)), source, eh, ScanComments)
|
||||
// epos is the expected position
|
||||
epos := token.Position{
|
||||
Filename: "",
|
||||
Offset: 0,
|
||||
Line: 1,
|
||||
Column: 1,
|
||||
}
|
||||
for {
|
||||
pos, tok, lit := s.Scan()
|
||||
if lit == "" {
|
||||
// no literal value for non-literal tokens
|
||||
lit = tok.String()
|
||||
}
|
||||
e := elt{token.EOF, "", special, "", ""}
|
||||
if index < len(tokens) {
|
||||
e = tokens[index]
|
||||
}
|
||||
if tok == token.EOF {
|
||||
lit = "<EOF>"
|
||||
epos.Line = src_linecount
|
||||
epos.Column = 2
|
||||
}
|
||||
if e.pre != "" && strings.ContainsRune("=;#", rune(e.pre[0])) {
|
||||
epos.Column = 1
|
||||
checkPos(t, lit, pos, epos)
|
||||
var etok token.Token
|
||||
if e.pre[0] == '=' {
|
||||
etok = token.ASSIGN
|
||||
} else {
|
||||
etok = token.COMMENT
|
||||
}
|
||||
if tok != etok {
|
||||
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, etok)
|
||||
}
|
||||
pos, tok, lit = s.Scan()
|
||||
}
|
||||
epos.Offset += len(e.pre)
|
||||
if tok != token.EOF {
|
||||
epos.Column = 1 + len(e.pre)
|
||||
}
|
||||
if e.pre != "" && e.pre[len(e.pre)-1] == '\n' {
|
||||
epos.Offset--
|
||||
epos.Column--
|
||||
checkPos(t, lit, pos, epos)
|
||||
if tok != token.EOL {
|
||||
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
|
||||
}
|
||||
epos.Line++
|
||||
epos.Offset++
|
||||
epos.Column = 1
|
||||
pos, tok, lit = s.Scan()
|
||||
}
|
||||
checkPos(t, lit, pos, epos)
|
||||
if tok != e.tok {
|
||||
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, e.tok)
|
||||
}
|
||||
if e.tok.IsLiteral() {
|
||||
// no CRs in value string literals
|
||||
elit := e.lit
|
||||
if strings.ContainsRune(e.pre, '=') {
|
||||
elit = string(stripCR([]byte(elit)))
|
||||
epos.Offset += len(e.lit) - len(lit) // correct position
|
||||
}
|
||||
if lit != elit {
|
||||
t.Errorf("bad literal for %q: got %q, expected %q", lit, lit, elit)
|
||||
}
|
||||
}
|
||||
if tokenclass(tok) != e.class {
|
||||
t.Errorf("bad class for %q: got %d, expected %d", lit, tokenclass(tok), e.class)
|
||||
}
|
||||
epos.Offset += len(lit) + len(e.suf) + len(whitespace)
|
||||
epos.Line += newlineCount(lit) + newlineCount(e.suf) + whitespace_linecount
|
||||
index++
|
||||
if tok == token.EOF {
|
||||
break
|
||||
}
|
||||
if e.suf == "value" {
|
||||
pos, tok, lit = s.Scan()
|
||||
if tok != token.STRING {
|
||||
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.STRING)
|
||||
}
|
||||
} else if strings.ContainsRune(e.suf, ';') || strings.ContainsRune(e.suf, '#') {
|
||||
pos, tok, lit = s.Scan()
|
||||
if tok != token.COMMENT {
|
||||
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.COMMENT)
|
||||
}
|
||||
}
|
||||
// skip EOLs
|
||||
for i := 0; i < whitespace_linecount+newlineCount(e.suf); i++ {
|
||||
pos, tok, lit = s.Scan()
|
||||
if tok != token.EOL {
|
||||
t.Errorf("bad token for %q: got %q, expected %q", lit, tok, token.EOL)
|
||||
}
|
||||
}
|
||||
}
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("found %d errors", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestScanValStringEOF(t *testing.T) {
|
||||
var s Scanner
|
||||
src := "= value"
|
||||
f := fset.AddFile("src", fset.Base(), len(src))
|
||||
s.Init(f, []byte(src), nil, 0)
|
||||
s.Scan() // =
|
||||
s.Scan() // value
|
||||
_, tok, _ := s.Scan() // EOF
|
||||
if tok != token.EOF {
|
||||
t.Errorf("bad token: got %s, expected %s", tok, token.EOF)
|
||||
}
|
||||
if s.ErrorCount > 0 {
|
||||
t.Error("scanning error")
|
||||
}
|
||||
}
|
||||
|
||||
// Verify that initializing the same scanner more then once works correctly.
|
||||
func TestInit(t *testing.T) {
|
||||
var s Scanner
|
||||
|
||||
// 1st init
|
||||
src1 := "\nname = value"
|
||||
f1 := fset.AddFile("src1", fset.Base(), len(src1))
|
||||
s.Init(f1, []byte(src1), nil, 0)
|
||||
if f1.Size() != len(src1) {
|
||||
t.Errorf("bad file size: got %d, expected %d", f1.Size(), len(src1))
|
||||
}
|
||||
s.Scan() // \n
|
||||
s.Scan() // name
|
||||
_, tok, _ := s.Scan() // =
|
||||
if tok != token.ASSIGN {
|
||||
t.Errorf("bad token: got %s, expected %s", tok, token.ASSIGN)
|
||||
}
|
||||
|
||||
// 2nd init
|
||||
src2 := "[section]"
|
||||
f2 := fset.AddFile("src2", fset.Base(), len(src2))
|
||||
s.Init(f2, []byte(src2), nil, 0)
|
||||
if f2.Size() != len(src2) {
|
||||
t.Errorf("bad file size: got %d, expected %d", f2.Size(), len(src2))
|
||||
}
|
||||
_, tok, _ = s.Scan() // [
|
||||
if tok != token.LBRACK {
|
||||
t.Errorf("bad token: got %s, expected %s", tok, token.LBRACK)
|
||||
}
|
||||
|
||||
if s.ErrorCount != 0 {
|
||||
t.Errorf("found %d errors", s.ErrorCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStdErrorHandler(t *testing.T) {
|
||||
const src = "@\n" + // illegal character, cause an error
|
||||
"@ @\n" // two errors on the same line
|
||||
|
||||
var list ErrorList
|
||||
eh := func(pos token.Position, msg string) { list.Add(pos, msg) }
|
||||
|
||||
var s Scanner
|
||||
s.Init(fset.AddFile("File1", fset.Base(), len(src)), []byte(src), eh, 0)
|
||||
for {
|
||||
if _, tok, _ := s.Scan(); tok == token.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if len(list) != s.ErrorCount {
|
||||
t.Errorf("found %d errors, expected %d", len(list), s.ErrorCount)
|
||||
}
|
||||
|
||||
if len(list) != 3 {
|
||||
t.Errorf("found %d raw errors, expected 3", len(list))
|
||||
PrintError(os.Stderr, list)
|
||||
}
|
||||
|
||||
list.Sort()
|
||||
if len(list) != 3 {
|
||||
t.Errorf("found %d sorted errors, expected 3", len(list))
|
||||
PrintError(os.Stderr, list)
|
||||
}
|
||||
|
||||
list.RemoveMultiples()
|
||||
if len(list) != 2 {
|
||||
t.Errorf("found %d one-per-line errors, expected 2", len(list))
|
||||
PrintError(os.Stderr, list)
|
||||
}
|
||||
}
|
||||
|
||||
type errorCollector struct {
|
||||
cnt int // number of errors encountered
|
||||
msg string // last error message encountered
|
||||
pos token.Position // last error position encountered
|
||||
}
|
||||
|
||||
func checkError(t *testing.T, src string, tok token.Token, pos int, err string) {
|
||||
var s Scanner
|
||||
var h errorCollector
|
||||
eh := func(pos token.Position, msg string) {
|
||||
h.cnt++
|
||||
h.msg = msg
|
||||
h.pos = pos
|
||||
}
|
||||
s.Init(fset.AddFile("", fset.Base(), len(src)), []byte(src), eh, ScanComments)
|
||||
if src[0] == '=' {
|
||||
_, _, _ = s.Scan()
|
||||
}
|
||||
_, tok0, _ := s.Scan()
|
||||
_, tok1, _ := s.Scan()
|
||||
if tok0 != tok {
|
||||
t.Errorf("%q: got %s, expected %s", src, tok0, tok)
|
||||
}
|
||||
if tok1 != token.EOF {
|
||||
t.Errorf("%q: got %s, expected EOF", src, tok1)
|
||||
}
|
||||
cnt := 0
|
||||
if err != "" {
|
||||
cnt = 1
|
||||
}
|
||||
if h.cnt != cnt {
|
||||
t.Errorf("%q: got cnt %d, expected %d", src, h.cnt, cnt)
|
||||
}
|
||||
if h.msg != err {
|
||||
t.Errorf("%q: got msg %q, expected %q", src, h.msg, err)
|
||||
}
|
||||
if h.pos.Offset != pos {
|
||||
t.Errorf("%q: got offset %d, expected %d", src, h.pos.Offset, pos)
|
||||
}
|
||||
}
|
||||
|
||||
var errors = []struct {
|
||||
src string
|
||||
tok token.Token
|
||||
pos int
|
||||
err string
|
||||
}{
|
||||
{"\a", token.ILLEGAL, 0, "illegal character U+0007"},
|
||||
{"/", token.ILLEGAL, 0, "illegal character U+002F '/'"},
|
||||
{"_", token.ILLEGAL, 0, "illegal character U+005F '_'"},
|
||||
{`…`, token.ILLEGAL, 0, "illegal character U+2026 '…'"},
|
||||
{`""`, token.STRING, 0, ""},
|
||||
{`"`, token.STRING, 0, "string not terminated"},
|
||||
{"\"\n", token.STRING, 0, "string not terminated"},
|
||||
{`="`, token.STRING, 1, "string not terminated"},
|
||||
{"=\"\n", token.STRING, 1, "string not terminated"},
|
||||
{"=\\", token.STRING, 1, "unquoted '\\' must be followed by new line"},
|
||||
{"=\\\r", token.STRING, 1, "unquoted '\\' must be followed by new line"},
|
||||
{`"\z"`, token.STRING, 2, "unknown escape sequence"},
|
||||
{`"\a"`, token.STRING, 2, "unknown escape sequence"},
|
||||
{`"\b"`, token.STRING, 2, "unknown escape sequence"},
|
||||
{`"\f"`, token.STRING, 2, "unknown escape sequence"},
|
||||
{`"\r"`, token.STRING, 2, "unknown escape sequence"},
|
||||
{`"\t"`, token.STRING, 2, "unknown escape sequence"},
|
||||
{`"\v"`, token.STRING, 2, "unknown escape sequence"},
|
||||
{`"\0"`, token.STRING, 2, "unknown escape sequence"},
|
||||
}
|
||||
|
||||
func TestScanErrors(t *testing.T) {
|
||||
for _, e := range errors {
|
||||
checkError(t, e.src, e.tok, e.pos, e.err)
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkScan(b *testing.B) {
|
||||
b.StopTimer()
|
||||
fset := token.NewFileSet()
|
||||
file := fset.AddFile("", fset.Base(), len(source))
|
||||
var s Scanner
|
||||
b.StartTimer()
|
||||
for i := b.N - 1; i >= 0; i-- {
|
||||
s.Init(file, source, nil, ScanComments)
|
||||
for {
|
||||
_, tok, _ := s.Scan()
|
||||
if tok == token.EOF {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,281 @@
|
|||
package gcfg
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
"reflect"
|
||||
"strings"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/scalingdata/gcfg/types"
|
||||
)
|
||||
|
||||
type tag struct {
|
||||
ident string
|
||||
intMode string
|
||||
}
|
||||
|
||||
func newTag(ts string) tag {
|
||||
t := tag{}
|
||||
s := strings.Split(ts, ",")
|
||||
t.ident = s[0]
|
||||
for _, tse := range s[1:] {
|
||||
if strings.HasPrefix(tse, "int=") {
|
||||
t.intMode = tse[len("int="):]
|
||||
}
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func fieldFold(v reflect.Value, name string) (reflect.Value, tag) {
|
||||
var n string
|
||||
r0, _ := utf8.DecodeRuneInString(name)
|
||||
if unicode.IsLetter(r0) && !unicode.IsLower(r0) && !unicode.IsUpper(r0) {
|
||||
n = "X"
|
||||
}
|
||||
n += strings.Replace(name, "-", "_", -1)
|
||||
f, ok := v.Type().FieldByNameFunc(func(fieldName string) bool {
|
||||
if !v.FieldByName(fieldName).CanSet() {
|
||||
return false
|
||||
}
|
||||
f, _ := v.Type().FieldByName(fieldName)
|
||||
t := newTag(f.Tag.Get("gcfg"))
|
||||
if t.ident != "" {
|
||||
return strings.EqualFold(t.ident, name)
|
||||
}
|
||||
return strings.EqualFold(n, fieldName)
|
||||
})
|
||||
if !ok {
|
||||
return reflect.Value{}, tag{}
|
||||
}
|
||||
return v.FieldByName(f.Name), newTag(f.Tag.Get("gcfg"))
|
||||
}
|
||||
|
||||
type setter func(destp interface{}, blank bool, val string, t tag) error
|
||||
|
||||
var errUnsupportedType = fmt.Errorf("unsupported type")
|
||||
var errBlankUnsupported = fmt.Errorf("blank value not supported for type")
|
||||
|
||||
var setters = []setter{
|
||||
typeSetter, textUnmarshalerSetter, kindSetter, scanSetter,
|
||||
}
|
||||
|
||||
func textUnmarshalerSetter(d interface{}, blank bool, val string, t tag) error {
|
||||
dtu, ok := d.(textUnmarshaler)
|
||||
if !ok {
|
||||
return errUnsupportedType
|
||||
}
|
||||
if blank {
|
||||
return errBlankUnsupported
|
||||
}
|
||||
return dtu.UnmarshalText([]byte(val))
|
||||
}
|
||||
|
||||
func boolSetter(d interface{}, blank bool, val string, t tag) error {
|
||||
if blank {
|
||||
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(true))
|
||||
return nil
|
||||
}
|
||||
b, err := types.ParseBool(val)
|
||||
if err == nil {
|
||||
reflect.ValueOf(d).Elem().Set(reflect.ValueOf(b))
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func intMode(mode string) types.IntMode {
|
||||
var m types.IntMode
|
||||
if strings.ContainsAny(mode, "dD") {
|
||||
m |= types.Dec
|
||||
}
|
||||
if strings.ContainsAny(mode, "hH") {
|
||||
m |= types.Hex
|
||||
}
|
||||
if strings.ContainsAny(mode, "oO") {
|
||||
m |= types.Oct
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
var typeModes = map[reflect.Type]types.IntMode{
|
||||
reflect.TypeOf(int(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(int8(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(int16(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(int32(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(int64(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(uint(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(uint8(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(uint16(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(uint32(0)): types.Dec | types.Hex,
|
||||
reflect.TypeOf(uint64(0)): types.Dec | types.Hex,
|
||||
// use default mode (allow dec/hex/oct) for uintptr type
|
||||
reflect.TypeOf(big.Int{}): types.Dec | types.Hex,
|
||||
}
|
||||
|
||||
func intModeDefault(t reflect.Type) types.IntMode {
|
||||
m, ok := typeModes[t]
|
||||
if !ok {
|
||||
m = types.Dec | types.Hex | types.Oct
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
func intSetter(d interface{}, blank bool, val string, t tag) error {
|
||||
if blank {
|
||||
return errBlankUnsupported
|
||||
}
|
||||
mode := intMode(t.intMode)
|
||||
if mode == 0 {
|
||||
mode = intModeDefault(reflect.TypeOf(d).Elem())
|
||||
}
|
||||
return types.ParseInt(d, val, mode)
|
||||
}
|
||||
|
||||
func stringSetter(d interface{}, blank bool, val string, t tag) error {
|
||||
if blank {
|
||||
return errBlankUnsupported
|
||||
}
|
||||
dsp, ok := d.(*string)
|
||||
if !ok {
|
||||
return errUnsupportedType
|
||||
}
|
||||
*dsp = val
|
||||
return nil
|
||||
}
|
||||
|
||||
var kindSetters = map[reflect.Kind]setter{
|
||||
reflect.String: stringSetter,
|
||||
reflect.Bool: boolSetter,
|
||||
reflect.Int: intSetter,
|
||||
reflect.Int8: intSetter,
|
||||
reflect.Int16: intSetter,
|
||||
reflect.Int32: intSetter,
|
||||
reflect.Int64: intSetter,
|
||||
reflect.Uint: intSetter,
|
||||
reflect.Uint8: intSetter,
|
||||
reflect.Uint16: intSetter,
|
||||
reflect.Uint32: intSetter,
|
||||
reflect.Uint64: intSetter,
|
||||
reflect.Uintptr: intSetter,
|
||||
}
|
||||
|
||||
var typeSetters = map[reflect.Type]setter{
|
||||
reflect.TypeOf(big.Int{}): intSetter,
|
||||
}
|
||||
|
||||
func typeSetter(d interface{}, blank bool, val string, tt tag) error {
|
||||
t := reflect.ValueOf(d).Type().Elem()
|
||||
setter, ok := typeSetters[t]
|
||||
if !ok {
|
||||
return errUnsupportedType
|
||||
}
|
||||
return setter(d, blank, val, tt)
|
||||
}
|
||||
|
||||
func kindSetter(d interface{}, blank bool, val string, tt tag) error {
|
||||
k := reflect.ValueOf(d).Type().Elem().Kind()
|
||||
setter, ok := kindSetters[k]
|
||||
if !ok {
|
||||
return errUnsupportedType
|
||||
}
|
||||
return setter(d, blank, val, tt)
|
||||
}
|
||||
|
||||
func scanSetter(d interface{}, blank bool, val string, tt tag) error {
|
||||
if blank {
|
||||
return errBlankUnsupported
|
||||
}
|
||||
return types.ScanFully(d, val, 'v')
|
||||
}
|
||||
|
||||
func set(cfg interface{}, sect, sub, name string, blank bool, value string) error {
|
||||
vPCfg := reflect.ValueOf(cfg)
|
||||
if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {
|
||||
panic(fmt.Errorf("config must be a pointer to a struct"))
|
||||
}
|
||||
vCfg := vPCfg.Elem()
|
||||
vSect, _ := fieldFold(vCfg, sect)
|
||||
if !vSect.IsValid() {
|
||||
return fmt.Errorf("invalid section: section %q", sect)
|
||||
}
|
||||
if vSect.Kind() == reflect.Map {
|
||||
vst := vSect.Type()
|
||||
if vst.Key().Kind() != reflect.String ||
|
||||
vst.Elem().Kind() != reflect.Ptr ||
|
||||
vst.Elem().Elem().Kind() != reflect.Struct {
|
||||
panic(fmt.Errorf("map field for section must have string keys and "+
|
||||
" pointer-to-struct values: section %q", sect))
|
||||
}
|
||||
if vSect.IsNil() {
|
||||
vSect.Set(reflect.MakeMap(vst))
|
||||
}
|
||||
k := reflect.ValueOf(sub)
|
||||
pv := vSect.MapIndex(k)
|
||||
if !pv.IsValid() {
|
||||
vType := vSect.Type().Elem().Elem()
|
||||
pv = reflect.New(vType)
|
||||
vSect.SetMapIndex(k, pv)
|
||||
}
|
||||
vSect = pv.Elem()
|
||||
} else if vSect.Kind() != reflect.Struct {
|
||||
panic(fmt.Errorf("field for section must be a map or a struct: "+
|
||||
"section %q", sect))
|
||||
} else if sub != "" {
|
||||
return fmt.Errorf("invalid subsection: "+
|
||||
"section %q subsection %q", sect, sub)
|
||||
}
|
||||
vVar, t := fieldFold(vSect, name)
|
||||
if !vVar.IsValid() {
|
||||
return fmt.Errorf("invalid variable: "+
|
||||
"section %q subsection %q variable %q", sect, sub, name)
|
||||
}
|
||||
// vVal is either single-valued var, or newly allocated value within multi-valued var
|
||||
var vVal reflect.Value
|
||||
// multi-value if unnamed slice type
|
||||
isMulti := vVar.Type().Name() == "" && vVar.Kind() == reflect.Slice
|
||||
if isMulti && blank {
|
||||
vVar.Set(reflect.Zero(vVar.Type()))
|
||||
return nil
|
||||
}
|
||||
if isMulti {
|
||||
vVal = reflect.New(vVar.Type().Elem()).Elem()
|
||||
} else {
|
||||
vVal = vVar
|
||||
}
|
||||
isDeref := vVal.Type().Name() == "" && vVal.Type().Kind() == reflect.Ptr
|
||||
isNew := isDeref && vVal.IsNil()
|
||||
// vAddr is address of value to set (dereferenced & allocated as needed)
|
||||
var vAddr reflect.Value
|
||||
switch {
|
||||
case isNew:
|
||||
vAddr = reflect.New(vVal.Type().Elem())
|
||||
case isDeref && !isNew:
|
||||
vAddr = vVal
|
||||
default:
|
||||
vAddr = vVal.Addr()
|
||||
}
|
||||
vAddrI := vAddr.Interface()
|
||||
err, ok := error(nil), false
|
||||
for _, s := range setters {
|
||||
err = s(vAddrI, blank, value, t)
|
||||
if err == nil {
|
||||
ok = true
|
||||
break
|
||||
}
|
||||
if err != errUnsupportedType {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
// in case all setters returned errUnsupportedType
|
||||
return err
|
||||
}
|
||||
if isNew { // set reference if it was dereferenced and newly allocated
|
||||
vVal.Set(vAddr)
|
||||
}
|
||||
if isMulti { // append if multi-valued
|
||||
vVar.Set(reflect.Append(vVar, vVal))
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
; Comment line
|
||||
[section]
|
||||
name=value # comment
|
|
@ -0,0 +1,3 @@
|
|||
; Comment line
|
||||
[甲]
|
||||
乙=丙 # comment
|
|
@ -0,0 +1,435 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// TODO(gri) consider making this a separate package outside the go directory.
|
||||
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Positions
|
||||
|
||||
// Position describes an arbitrary source position
|
||||
// including the file, line, and column location.
|
||||
// A Position is valid if the line number is > 0.
|
||||
//
|
||||
type Position struct {
|
||||
Filename string // filename, if any
|
||||
Offset int // offset, starting at 0
|
||||
Line int // line number, starting at 1
|
||||
Column int // column number, starting at 1 (character count)
|
||||
}
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (pos *Position) IsValid() bool { return pos.Line > 0 }
|
||||
|
||||
// String returns a string in one of several forms:
|
||||
//
|
||||
// file:line:column valid position with file name
|
||||
// line:column valid position without file name
|
||||
// file invalid position with file name
|
||||
// - invalid position without file name
|
||||
//
|
||||
func (pos Position) String() string {
|
||||
s := pos.Filename
|
||||
if pos.IsValid() {
|
||||
if s != "" {
|
||||
s += ":"
|
||||
}
|
||||
s += fmt.Sprintf("%d:%d", pos.Line, pos.Column)
|
||||
}
|
||||
if s == "" {
|
||||
s = "-"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Pos is a compact encoding of a source position within a file set.
|
||||
// It can be converted into a Position for a more convenient, but much
|
||||
// larger, representation.
|
||||
//
|
||||
// The Pos value for a given file is a number in the range [base, base+size],
|
||||
// where base and size are specified when adding the file to the file set via
|
||||
// AddFile.
|
||||
//
|
||||
// To create the Pos value for a specific source offset, first add
|
||||
// the respective file to the current file set (via FileSet.AddFile)
|
||||
// and then call File.Pos(offset) for that file. Given a Pos value p
|
||||
// for a specific file set fset, the corresponding Position value is
|
||||
// obtained by calling fset.Position(p).
|
||||
//
|
||||
// Pos values can be compared directly with the usual comparison operators:
|
||||
// If two Pos values p and q are in the same file, comparing p and q is
|
||||
// equivalent to comparing the respective source file offsets. If p and q
|
||||
// are in different files, p < q is true if the file implied by p was added
|
||||
// to the respective file set before the file implied by q.
|
||||
//
|
||||
type Pos int
|
||||
|
||||
// The zero value for Pos is NoPos; there is no file and line information
|
||||
// associated with it, and NoPos().IsValid() is false. NoPos is always
|
||||
// smaller than any other Pos value. The corresponding Position value
|
||||
// for NoPos is the zero value for Position.
|
||||
//
|
||||
const NoPos Pos = 0
|
||||
|
||||
// IsValid returns true if the position is valid.
|
||||
func (p Pos) IsValid() bool {
|
||||
return p != NoPos
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// File
|
||||
|
||||
// A File is a handle for a file belonging to a FileSet.
|
||||
// A File has a name, size, and line offset table.
|
||||
//
|
||||
type File struct {
|
||||
set *FileSet
|
||||
name string // file name as provided to AddFile
|
||||
base int // Pos value range for this file is [base...base+size]
|
||||
size int // file size as provided to AddFile
|
||||
|
||||
// lines and infos are protected by set.mutex
|
||||
lines []int
|
||||
infos []lineInfo
|
||||
}
|
||||
|
||||
// Name returns the file name of file f as registered with AddFile.
|
||||
func (f *File) Name() string {
|
||||
return f.name
|
||||
}
|
||||
|
||||
// Base returns the base offset of file f as registered with AddFile.
|
||||
func (f *File) Base() int {
|
||||
return f.base
|
||||
}
|
||||
|
||||
// Size returns the size of file f as registered with AddFile.
|
||||
func (f *File) Size() int {
|
||||
return f.size
|
||||
}
|
||||
|
||||
// LineCount returns the number of lines in file f.
|
||||
func (f *File) LineCount() int {
|
||||
f.set.mutex.RLock()
|
||||
n := len(f.lines)
|
||||
f.set.mutex.RUnlock()
|
||||
return n
|
||||
}
|
||||
|
||||
// AddLine adds the line offset for a new line.
|
||||
// The line offset must be larger than the offset for the previous line
|
||||
// and smaller than the file size; otherwise the line offset is ignored.
|
||||
//
|
||||
func (f *File) AddLine(offset int) {
|
||||
f.set.mutex.Lock()
|
||||
if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
|
||||
f.lines = append(f.lines, offset)
|
||||
}
|
||||
f.set.mutex.Unlock()
|
||||
}
|
||||
|
||||
// SetLines sets the line offsets for a file and returns true if successful.
|
||||
// The line offsets are the offsets of the first character of each line;
|
||||
// for instance for the content "ab\nc\n" the line offsets are {0, 3}.
|
||||
// An empty file has an empty line offset table.
|
||||
// Each line offset must be larger than the offset for the previous line
|
||||
// and smaller than the file size; otherwise SetLines fails and returns
|
||||
// false.
|
||||
//
|
||||
func (f *File) SetLines(lines []int) bool {
|
||||
// verify validity of lines table
|
||||
size := f.size
|
||||
for i, offset := range lines {
|
||||
if i > 0 && offset <= lines[i-1] || size <= offset {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// set lines table
|
||||
f.set.mutex.Lock()
|
||||
f.lines = lines
|
||||
f.set.mutex.Unlock()
|
||||
return true
|
||||
}
|
||||
|
||||
// SetLinesForContent sets the line offsets for the given file content.
|
||||
func (f *File) SetLinesForContent(content []byte) {
|
||||
var lines []int
|
||||
line := 0
|
||||
for offset, b := range content {
|
||||
if line >= 0 {
|
||||
lines = append(lines, line)
|
||||
}
|
||||
line = -1
|
||||
if b == '\n' {
|
||||
line = offset + 1
|
||||
}
|
||||
}
|
||||
|
||||
// set lines table
|
||||
f.set.mutex.Lock()
|
||||
f.lines = lines
|
||||
f.set.mutex.Unlock()
|
||||
}
|
||||
|
||||
// A lineInfo object describes alternative file and line number
|
||||
// information (such as provided via a //line comment in a .go
|
||||
// file) for a given file offset.
|
||||
type lineInfo struct {
|
||||
// fields are exported to make them accessible to gob
|
||||
Offset int
|
||||
Filename string
|
||||
Line int
|
||||
}
|
||||
|
||||
// AddLineInfo adds alternative file and line number information for
|
||||
// a given file offset. The offset must be larger than the offset for
|
||||
// the previously added alternative line info and smaller than the
|
||||
// file size; otherwise the information is ignored.
|
||||
//
|
||||
// AddLineInfo is typically used to register alternative position
|
||||
// information for //line filename:line comments in source files.
|
||||
//
|
||||
func (f *File) AddLineInfo(offset int, filename string, line int) {
|
||||
f.set.mutex.Lock()
|
||||
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
|
||||
f.infos = append(f.infos, lineInfo{offset, filename, line})
|
||||
}
|
||||
f.set.mutex.Unlock()
|
||||
}
|
||||
|
||||
// Pos returns the Pos value for the given file offset;
|
||||
// the offset must be <= f.Size().
|
||||
// f.Pos(f.Offset(p)) == p.
|
||||
//
|
||||
func (f *File) Pos(offset int) Pos {
|
||||
if offset > f.size {
|
||||
panic("illegal file offset")
|
||||
}
|
||||
return Pos(f.base + offset)
|
||||
}
|
||||
|
||||
// Offset returns the offset for the given file position p;
|
||||
// p must be a valid Pos value in that file.
|
||||
// f.Offset(f.Pos(offset)) == offset.
|
||||
//
|
||||
func (f *File) Offset(p Pos) int {
|
||||
if int(p) < f.base || int(p) > f.base+f.size {
|
||||
panic("illegal Pos value")
|
||||
}
|
||||
return int(p) - f.base
|
||||
}
|
||||
|
||||
// Line returns the line number for the given file position p;
|
||||
// p must be a Pos value in that file or NoPos.
|
||||
//
|
||||
func (f *File) Line(p Pos) int {
|
||||
// TODO(gri) this can be implemented much more efficiently
|
||||
return f.Position(p).Line
|
||||
}
|
||||
|
||||
func searchLineInfos(a []lineInfo, x int) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i].Offset > x }) - 1
|
||||
}
|
||||
|
||||
// info returns the file name, line, and column number for a file offset.
|
||||
func (f *File) info(offset int) (filename string, line, column int) {
|
||||
filename = f.name
|
||||
if i := searchInts(f.lines, offset); i >= 0 {
|
||||
line, column = i+1, offset-f.lines[i]+1
|
||||
}
|
||||
if len(f.infos) > 0 {
|
||||
// almost no files have extra line infos
|
||||
if i := searchLineInfos(f.infos, offset); i >= 0 {
|
||||
alt := &f.infos[i]
|
||||
filename = alt.Filename
|
||||
if i := searchInts(f.lines, alt.Offset); i >= 0 {
|
||||
line += alt.Line - i - 1
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (f *File) position(p Pos) (pos Position) {
|
||||
offset := int(p) - f.base
|
||||
pos.Offset = offset
|
||||
pos.Filename, pos.Line, pos.Column = f.info(offset)
|
||||
return
|
||||
}
|
||||
|
||||
// Position returns the Position value for the given file position p;
|
||||
// p must be a Pos value in that file or NoPos.
|
||||
//
|
||||
func (f *File) Position(p Pos) (pos Position) {
|
||||
if p != NoPos {
|
||||
if int(p) < f.base || int(p) > f.base+f.size {
|
||||
panic("illegal Pos value")
|
||||
}
|
||||
pos = f.position(p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// FileSet
|
||||
|
||||
// A FileSet represents a set of source files.
|
||||
// Methods of file sets are synchronized; multiple goroutines
|
||||
// may invoke them concurrently.
|
||||
//
|
||||
type FileSet struct {
|
||||
mutex sync.RWMutex // protects the file set
|
||||
base int // base offset for the next file
|
||||
files []*File // list of files in the order added to the set
|
||||
last *File // cache of last file looked up
|
||||
}
|
||||
|
||||
// NewFileSet creates a new file set.
|
||||
func NewFileSet() *FileSet {
|
||||
s := new(FileSet)
|
||||
s.base = 1 // 0 == NoPos
|
||||
return s
|
||||
}
|
||||
|
||||
// Base returns the minimum base offset that must be provided to
|
||||
// AddFile when adding the next file.
|
||||
//
|
||||
func (s *FileSet) Base() int {
|
||||
s.mutex.RLock()
|
||||
b := s.base
|
||||
s.mutex.RUnlock()
|
||||
return b
|
||||
|
||||
}
|
||||
|
||||
// AddFile adds a new file with a given filename, base offset, and file size
|
||||
// to the file set s and returns the file. Multiple files may have the same
|
||||
// name. The base offset must not be smaller than the FileSet's Base(), and
|
||||
// size must not be negative.
|
||||
//
|
||||
// Adding the file will set the file set's Base() value to base + size + 1
|
||||
// as the minimum base value for the next file. The following relationship
|
||||
// exists between a Pos value p for a given file offset offs:
|
||||
//
|
||||
// int(p) = base + offs
|
||||
//
|
||||
// with offs in the range [0, size] and thus p in the range [base, base+size].
|
||||
// For convenience, File.Pos may be used to create file-specific position
|
||||
// values from a file offset.
|
||||
//
|
||||
func (s *FileSet) AddFile(filename string, base, size int) *File {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if base < s.base || size < 0 {
|
||||
panic("illegal base or size")
|
||||
}
|
||||
// base >= s.base && size >= 0
|
||||
f := &File{s, filename, base, size, []int{0}, nil}
|
||||
base += size + 1 // +1 because EOF also has a position
|
||||
if base < 0 {
|
||||
panic("token.Pos offset overflow (> 2G of source code in file set)")
|
||||
}
|
||||
// add the file to the file set
|
||||
s.base = base
|
||||
s.files = append(s.files, f)
|
||||
s.last = f
|
||||
return f
|
||||
}
|
||||
|
||||
// Iterate calls f for the files in the file set in the order they were added
|
||||
// until f returns false.
|
||||
//
|
||||
func (s *FileSet) Iterate(f func(*File) bool) {
|
||||
for i := 0; ; i++ {
|
||||
var file *File
|
||||
s.mutex.RLock()
|
||||
if i < len(s.files) {
|
||||
file = s.files[i]
|
||||
}
|
||||
s.mutex.RUnlock()
|
||||
if file == nil || !f(file) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func searchFiles(a []*File, x int) int {
|
||||
return sort.Search(len(a), func(i int) bool { return a[i].base > x }) - 1
|
||||
}
|
||||
|
||||
func (s *FileSet) file(p Pos) *File {
|
||||
// common case: p is in last file
|
||||
if f := s.last; f != nil && f.base <= int(p) && int(p) <= f.base+f.size {
|
||||
return f
|
||||
}
|
||||
// p is not in last file - search all files
|
||||
if i := searchFiles(s.files, int(p)); i >= 0 {
|
||||
f := s.files[i]
|
||||
// f.base <= int(p) by definition of searchFiles
|
||||
if int(p) <= f.base+f.size {
|
||||
s.last = f
|
||||
return f
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// File returns the file that contains the position p.
|
||||
// If no such file is found (for instance for p == NoPos),
|
||||
// the result is nil.
|
||||
//
|
||||
func (s *FileSet) File(p Pos) (f *File) {
|
||||
if p != NoPos {
|
||||
s.mutex.RLock()
|
||||
f = s.file(p)
|
||||
s.mutex.RUnlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Position converts a Pos in the fileset into a general Position.
|
||||
func (s *FileSet) Position(p Pos) (pos Position) {
|
||||
if p != NoPos {
|
||||
s.mutex.RLock()
|
||||
if f := s.file(p); f != nil {
|
||||
pos = f.position(p)
|
||||
}
|
||||
s.mutex.RUnlock()
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// Helper functions
|
||||
|
||||
func searchInts(a []int, x int) int {
|
||||
// This function body is a manually inlined version of:
|
||||
//
|
||||
// return sort.Search(len(a), func(i int) bool { return a[i] > x }) - 1
|
||||
//
|
||||
// With better compiler optimizations, this may not be needed in the
|
||||
// future, but at the moment this change improves the go/printer
|
||||
// benchmark performance by ~30%. This has a direct impact on the
|
||||
// speed of gofmt and thus seems worthwhile (2011-04-29).
|
||||
// TODO(gri): Remove this when compilers have caught up.
|
||||
i, j := 0, len(a)
|
||||
for i < j {
|
||||
h := i + (j-i)/2 // avoid overflow when computing h
|
||||
// i ≤ h < j
|
||||
if a[h] <= x {
|
||||
i = h + 1
|
||||
} else {
|
||||
j = h
|
||||
}
|
||||
}
|
||||
return i - 1
|
||||
}
|
|
@ -0,0 +1,181 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package token
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func checkPos(t *testing.T, msg string, p, q Position) {
|
||||
if p.Filename != q.Filename {
|
||||
t.Errorf("%s: expected filename = %q; got %q", msg, q.Filename, p.Filename)
|
||||
}
|
||||
if p.Offset != q.Offset {
|
||||
t.Errorf("%s: expected offset = %d; got %d", msg, q.Offset, p.Offset)
|
||||
}
|
||||
if p.Line != q.Line {
|
||||
t.Errorf("%s: expected line = %d; got %d", msg, q.Line, p.Line)
|
||||
}
|
||||
if p.Column != q.Column {
|
||||
t.Errorf("%s: expected column = %d; got %d", msg, q.Column, p.Column)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoPos(t *testing.T) {
|
||||
if NoPos.IsValid() {
|
||||
t.Errorf("NoPos should not be valid")
|
||||
}
|
||||
var fset *FileSet
|
||||
checkPos(t, "nil NoPos", fset.Position(NoPos), Position{})
|
||||
fset = NewFileSet()
|
||||
checkPos(t, "fset NoPos", fset.Position(NoPos), Position{})
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
filename string
|
||||
source []byte // may be nil
|
||||
size int
|
||||
lines []int
|
||||
}{
|
||||
{"a", []byte{}, 0, []int{}},
|
||||
{"b", []byte("01234"), 5, []int{0}},
|
||||
{"c", []byte("\n\n\n\n\n\n\n\n\n"), 9, []int{0, 1, 2, 3, 4, 5, 6, 7, 8}},
|
||||
{"d", nil, 100, []int{0, 5, 10, 20, 30, 70, 71, 72, 80, 85, 90, 99}},
|
||||
{"e", nil, 777, []int{0, 80, 100, 120, 130, 180, 267, 455, 500, 567, 620}},
|
||||
{"f", []byte("package p\n\nimport \"fmt\""), 23, []int{0, 10, 11}},
|
||||
{"g", []byte("package p\n\nimport \"fmt\"\n"), 24, []int{0, 10, 11}},
|
||||
{"h", []byte("package p\n\nimport \"fmt\"\n "), 25, []int{0, 10, 11, 24}},
|
||||
}
|
||||
|
||||
func linecol(lines []int, offs int) (int, int) {
|
||||
prevLineOffs := 0
|
||||
for line, lineOffs := range lines {
|
||||
if offs < lineOffs {
|
||||
return line, offs - prevLineOffs + 1
|
||||
}
|
||||
prevLineOffs = lineOffs
|
||||
}
|
||||
return len(lines), offs - prevLineOffs + 1
|
||||
}
|
||||
|
||||
func verifyPositions(t *testing.T, fset *FileSet, f *File, lines []int) {
|
||||
for offs := 0; offs < f.Size(); offs++ {
|
||||
p := f.Pos(offs)
|
||||
offs2 := f.Offset(p)
|
||||
if offs2 != offs {
|
||||
t.Errorf("%s, Offset: expected offset %d; got %d", f.Name(), offs, offs2)
|
||||
}
|
||||
line, col := linecol(lines, offs)
|
||||
msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
|
||||
checkPos(t, msg, f.Position(f.Pos(offs)), Position{f.Name(), offs, line, col})
|
||||
checkPos(t, msg, fset.Position(p), Position{f.Name(), offs, line, col})
|
||||
}
|
||||
}
|
||||
|
||||
func makeTestSource(size int, lines []int) []byte {
|
||||
src := make([]byte, size)
|
||||
for _, offs := range lines {
|
||||
if offs > 0 {
|
||||
src[offs-1] = '\n'
|
||||
}
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
func TestPositions(t *testing.T) {
|
||||
const delta = 7 // a non-zero base offset increment
|
||||
fset := NewFileSet()
|
||||
for _, test := range tests {
|
||||
// verify consistency of test case
|
||||
if test.source != nil && len(test.source) != test.size {
|
||||
t.Errorf("%s: inconsistent test case: expected file size %d; got %d", test.filename, test.size, len(test.source))
|
||||
}
|
||||
|
||||
// add file and verify name and size
|
||||
f := fset.AddFile(test.filename, fset.Base()+delta, test.size)
|
||||
if f.Name() != test.filename {
|
||||
t.Errorf("expected filename %q; got %q", test.filename, f.Name())
|
||||
}
|
||||
if f.Size() != test.size {
|
||||
t.Errorf("%s: expected file size %d; got %d", f.Name(), test.size, f.Size())
|
||||
}
|
||||
if fset.File(f.Pos(0)) != f {
|
||||
t.Errorf("%s: f.Pos(0) was not found in f", f.Name())
|
||||
}
|
||||
|
||||
// add lines individually and verify all positions
|
||||
for i, offset := range test.lines {
|
||||
f.AddLine(offset)
|
||||
if f.LineCount() != i+1 {
|
||||
t.Errorf("%s, AddLine: expected line count %d; got %d", f.Name(), i+1, f.LineCount())
|
||||
}
|
||||
// adding the same offset again should be ignored
|
||||
f.AddLine(offset)
|
||||
if f.LineCount() != i+1 {
|
||||
t.Errorf("%s, AddLine: expected unchanged line count %d; got %d", f.Name(), i+1, f.LineCount())
|
||||
}
|
||||
verifyPositions(t, fset, f, test.lines[0:i+1])
|
||||
}
|
||||
|
||||
// add lines with SetLines and verify all positions
|
||||
if ok := f.SetLines(test.lines); !ok {
|
||||
t.Errorf("%s: SetLines failed", f.Name())
|
||||
}
|
||||
if f.LineCount() != len(test.lines) {
|
||||
t.Errorf("%s, SetLines: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
|
||||
}
|
||||
verifyPositions(t, fset, f, test.lines)
|
||||
|
||||
// add lines with SetLinesForContent and verify all positions
|
||||
src := test.source
|
||||
if src == nil {
|
||||
// no test source available - create one from scratch
|
||||
src = makeTestSource(test.size, test.lines)
|
||||
}
|
||||
f.SetLinesForContent(src)
|
||||
if f.LineCount() != len(test.lines) {
|
||||
t.Errorf("%s, SetLinesForContent: expected line count %d; got %d", f.Name(), len(test.lines), f.LineCount())
|
||||
}
|
||||
verifyPositions(t, fset, f, test.lines)
|
||||
}
|
||||
}
|
||||
|
||||
func TestLineInfo(t *testing.T) {
|
||||
fset := NewFileSet()
|
||||
f := fset.AddFile("foo", fset.Base(), 500)
|
||||
lines := []int{0, 42, 77, 100, 210, 220, 277, 300, 333, 401}
|
||||
// add lines individually and provide alternative line information
|
||||
for _, offs := range lines {
|
||||
f.AddLine(offs)
|
||||
f.AddLineInfo(offs, "bar", 42)
|
||||
}
|
||||
// verify positions for all offsets
|
||||
for offs := 0; offs <= f.Size(); offs++ {
|
||||
p := f.Pos(offs)
|
||||
_, col := linecol(lines, offs)
|
||||
msg := fmt.Sprintf("%s (offs = %d, p = %d)", f.Name(), offs, p)
|
||||
checkPos(t, msg, f.Position(f.Pos(offs)), Position{"bar", offs, 42, col})
|
||||
checkPos(t, msg, fset.Position(p), Position{"bar", offs, 42, col})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFiles(t *testing.T) {
|
||||
fset := NewFileSet()
|
||||
for i, test := range tests {
|
||||
fset.AddFile(test.filename, fset.Base(), test.size)
|
||||
j := 0
|
||||
fset.Iterate(func(f *File) bool {
|
||||
if f.Name() != tests[j].filename {
|
||||
t.Errorf("expected filename = %s; got %s", tests[j].filename, f.Name())
|
||||
}
|
||||
j++
|
||||
return true
|
||||
})
|
||||
if j != i+1 {
|
||||
t.Errorf("expected %d files; got %d", i+1, j)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package token
|
||||
|
||||
type serializedFile struct {
|
||||
// fields correspond 1:1 to fields with same (lower-case) name in File
|
||||
Name string
|
||||
Base int
|
||||
Size int
|
||||
Lines []int
|
||||
Infos []lineInfo
|
||||
}
|
||||
|
||||
type serializedFileSet struct {
|
||||
Base int
|
||||
Files []serializedFile
|
||||
}
|
||||
|
||||
// Read calls decode to deserialize a file set into s; s must not be nil.
|
||||
func (s *FileSet) Read(decode func(interface{}) error) error {
|
||||
var ss serializedFileSet
|
||||
if err := decode(&ss); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.mutex.Lock()
|
||||
s.base = ss.Base
|
||||
files := make([]*File, len(ss.Files))
|
||||
for i := 0; i < len(ss.Files); i++ {
|
||||
f := &ss.Files[i]
|
||||
files[i] = &File{s, f.Name, f.Base, f.Size, f.Lines, f.Infos}
|
||||
}
|
||||
s.files = files
|
||||
s.last = nil
|
||||
s.mutex.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write calls encode to serialize the file set s.
|
||||
func (s *FileSet) Write(encode func(interface{}) error) error {
|
||||
var ss serializedFileSet
|
||||
|
||||
s.mutex.Lock()
|
||||
ss.Base = s.base
|
||||
files := make([]serializedFile, len(s.files))
|
||||
for i, f := range s.files {
|
||||
files[i] = serializedFile{f.name, f.base, f.size, f.lines, f.infos}
|
||||
}
|
||||
ss.Files = files
|
||||
s.mutex.Unlock()
|
||||
|
||||
return encode(ss)
|
||||
}
|
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package token
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"testing"
|
||||
)
|
||||
|
||||
// equal returns nil if p and q describe the same file set;
|
||||
// otherwise it returns an error describing the discrepancy.
|
||||
func equal(p, q *FileSet) error {
|
||||
if p == q {
|
||||
// avoid deadlock if p == q
|
||||
return nil
|
||||
}
|
||||
|
||||
// not strictly needed for the test
|
||||
p.mutex.Lock()
|
||||
q.mutex.Lock()
|
||||
defer q.mutex.Unlock()
|
||||
defer p.mutex.Unlock()
|
||||
|
||||
if p.base != q.base {
|
||||
return fmt.Errorf("different bases: %d != %d", p.base, q.base)
|
||||
}
|
||||
|
||||
if len(p.files) != len(q.files) {
|
||||
return fmt.Errorf("different number of files: %d != %d", len(p.files), len(q.files))
|
||||
}
|
||||
|
||||
for i, f := range p.files {
|
||||
g := q.files[i]
|
||||
if f.set != p {
|
||||
return fmt.Errorf("wrong fileset for %q", f.name)
|
||||
}
|
||||
if g.set != q {
|
||||
return fmt.Errorf("wrong fileset for %q", g.name)
|
||||
}
|
||||
if f.name != g.name {
|
||||
return fmt.Errorf("different filenames: %q != %q", f.name, g.name)
|
||||
}
|
||||
if f.base != g.base {
|
||||
return fmt.Errorf("different base for %q: %d != %d", f.name, f.base, g.base)
|
||||
}
|
||||
if f.size != g.size {
|
||||
return fmt.Errorf("different size for %q: %d != %d", f.name, f.size, g.size)
|
||||
}
|
||||
for j, l := range f.lines {
|
||||
m := g.lines[j]
|
||||
if l != m {
|
||||
return fmt.Errorf("different offsets for %q", f.name)
|
||||
}
|
||||
}
|
||||
for j, l := range f.infos {
|
||||
m := g.infos[j]
|
||||
if l.Offset != m.Offset || l.Filename != m.Filename || l.Line != m.Line {
|
||||
return fmt.Errorf("different infos for %q", f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// we don't care about .last - it's just a cache
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkSerialize(t *testing.T, p *FileSet) {
|
||||
var buf bytes.Buffer
|
||||
encode := func(x interface{}) error {
|
||||
return gob.NewEncoder(&buf).Encode(x)
|
||||
}
|
||||
if err := p.Write(encode); err != nil {
|
||||
t.Errorf("writing fileset failed: %s", err)
|
||||
return
|
||||
}
|
||||
q := NewFileSet()
|
||||
decode := func(x interface{}) error {
|
||||
return gob.NewDecoder(&buf).Decode(x)
|
||||
}
|
||||
if err := q.Read(decode); err != nil {
|
||||
t.Errorf("reading fileset failed: %s", err)
|
||||
return
|
||||
}
|
||||
if err := equal(p, q); err != nil {
|
||||
t.Errorf("filesets not identical: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSerialization(t *testing.T) {
|
||||
p := NewFileSet()
|
||||
checkSerialize(t, p)
|
||||
// add some files
|
||||
for i := 0; i < 10; i++ {
|
||||
f := p.AddFile(fmt.Sprintf("file%d", i), p.Base()+i, i*100)
|
||||
checkSerialize(t, p)
|
||||
// add some lines and alternative file infos
|
||||
line := 1000
|
||||
for offs := 0; offs < f.Size(); offs += 40 + i {
|
||||
f.AddLine(offs)
|
||||
if offs%7 == 0 {
|
||||
f.AddLineInfo(offs, fmt.Sprintf("file%d", offs), line)
|
||||
line += 33
|
||||
}
|
||||
}
|
||||
checkSerialize(t, p)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,83 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package token defines constants representing the lexical tokens of the gcfg
|
||||
// configuration syntax and basic operations on tokens (printing, predicates).
|
||||
//
|
||||
// Note that the API for the token package may change to accommodate new
|
||||
// features or implementation changes in gcfg.
|
||||
//
|
||||
package token
|
||||
|
||||
import "strconv"
|
||||
|
||||
// Token is the set of lexical tokens of the gcfg configuration syntax.
|
||||
type Token int
|
||||
|
||||
// The list of tokens.
|
||||
const (
|
||||
// Special tokens
|
||||
ILLEGAL Token = iota
|
||||
EOF
|
||||
COMMENT
|
||||
|
||||
literal_beg
|
||||
// Identifiers and basic type literals
|
||||
// (these tokens stand for classes of literals)
|
||||
IDENT // section-name, variable-name
|
||||
STRING // "subsection-name", variable value
|
||||
literal_end
|
||||
|
||||
operator_beg
|
||||
// Operators and delimiters
|
||||
ASSIGN // =
|
||||
LBRACK // [
|
||||
RBRACK // ]
|
||||
EOL // \n
|
||||
operator_end
|
||||
)
|
||||
|
||||
var tokens = [...]string{
|
||||
ILLEGAL: "ILLEGAL",
|
||||
|
||||
EOF: "EOF",
|
||||
COMMENT: "COMMENT",
|
||||
|
||||
IDENT: "IDENT",
|
||||
STRING: "STRING",
|
||||
|
||||
ASSIGN: "=",
|
||||
LBRACK: "[",
|
||||
RBRACK: "]",
|
||||
EOL: "\n",
|
||||
}
|
||||
|
||||
// String returns the string corresponding to the token tok.
|
||||
// For operators and delimiters, the string is the actual token character
|
||||
// sequence (e.g., for the token ASSIGN, the string is "="). For all other
|
||||
// tokens the string corresponds to the token constant name (e.g. for the
|
||||
// token IDENT, the string is "IDENT").
|
||||
//
|
||||
func (tok Token) String() string {
|
||||
s := ""
|
||||
if 0 <= tok && tok < Token(len(tokens)) {
|
||||
s = tokens[tok]
|
||||
}
|
||||
if s == "" {
|
||||
s = "token(" + strconv.Itoa(int(tok)) + ")"
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// Predicates
|
||||
|
||||
// IsLiteral returns true for tokens corresponding to identifiers
|
||||
// and basic type literals; it returns false otherwise.
|
||||
//
|
||||
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
|
||||
|
||||
// IsOperator returns true for tokens corresponding to operators and
|
||||
// delimiters; it returns false otherwise.
|
||||
//
|
||||
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }
|
|
@ -0,0 +1,23 @@
|
|||
package types
|
||||
|
||||
// BoolValues defines the name and value mappings for ParseBool.
|
||||
var BoolValues = map[string]interface{}{
|
||||
"true": true, "yes": true, "on": true, "1": true,
|
||||
"false": false, "no": false, "off": false, "0": false,
|
||||
}
|
||||
|
||||
var boolParser = func() *EnumParser {
|
||||
ep := &EnumParser{}
|
||||
ep.AddVals(BoolValues)
|
||||
return ep
|
||||
}()
|
||||
|
||||
// ParseBool parses bool values according to the definitions in BoolValues.
|
||||
// Parsing is case-insensitive.
|
||||
func ParseBool(s string) (bool, error) {
|
||||
v, err := boolParser.Parse(s)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return v.(bool), nil
|
||||
}
|
|
@ -0,0 +1,4 @@
|
|||
// Package types defines helpers for type conversions.
|
||||
//
|
||||
// The API for this package is not finalized yet.
|
||||
package types
|
|
@ -0,0 +1,44 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// EnumParser parses "enum" values; i.e. a predefined set of strings to
|
||||
// predefined values.
|
||||
type EnumParser struct {
|
||||
Type string // type name; if not set, use type of first value added
|
||||
CaseMatch bool // if true, matching of strings is case-sensitive
|
||||
// PrefixMatch bool
|
||||
vals map[string]interface{}
|
||||
}
|
||||
|
||||
// AddVals adds strings and values to an EnumParser.
|
||||
func (ep *EnumParser) AddVals(vals map[string]interface{}) {
|
||||
if ep.vals == nil {
|
||||
ep.vals = make(map[string]interface{})
|
||||
}
|
||||
for k, v := range vals {
|
||||
if ep.Type == "" {
|
||||
ep.Type = reflect.TypeOf(v).Name()
|
||||
}
|
||||
if !ep.CaseMatch {
|
||||
k = strings.ToLower(k)
|
||||
}
|
||||
ep.vals[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// Parse parses the string and returns the value or an error.
|
||||
func (ep EnumParser) Parse(s string) (interface{}, error) {
|
||||
if !ep.CaseMatch {
|
||||
s = strings.ToLower(s)
|
||||
}
|
||||
v, ok := ep.vals[s]
|
||||
if !ok {
|
||||
return false, fmt.Errorf("failed to parse %s %#q", ep.Type, s)
|
||||
}
|
||||
return v, nil
|
||||
}
|
|
@ -0,0 +1,29 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestEnumParserBool(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
val string
|
||||
res bool
|
||||
ok bool
|
||||
}{
|
||||
{val: "tRuE", res: true, ok: true},
|
||||
{val: "False", res: false, ok: true},
|
||||
{val: "t", ok: false},
|
||||
} {
|
||||
b, err := ParseBool(tt.val)
|
||||
switch {
|
||||
case tt.ok && err != nil:
|
||||
t.Errorf("%q: got error %v, want %v", tt.val, err, tt.res)
|
||||
case !tt.ok && err == nil:
|
||||
t.Errorf("%q: got %v, want error", tt.val, b)
|
||||
case tt.ok && b != tt.res:
|
||||
t.Errorf("%q: got %v, want %v", tt.val, b, tt.res)
|
||||
default:
|
||||
t.Logf("%q: got %v, %v", tt.val, b, err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,86 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// An IntMode is a mode for parsing integer values, representing a set of
|
||||
// accepted bases.
|
||||
type IntMode uint8
|
||||
|
||||
// IntMode values for ParseInt; can be combined using binary or.
|
||||
const (
|
||||
Dec IntMode = 1 << iota
|
||||
Hex
|
||||
Oct
|
||||
)
|
||||
|
||||
// String returns a string representation of IntMode; e.g. `IntMode(Dec|Hex)`.
|
||||
func (m IntMode) String() string {
|
||||
var modes []string
|
||||
if m&Dec != 0 {
|
||||
modes = append(modes, "Dec")
|
||||
}
|
||||
if m&Hex != 0 {
|
||||
modes = append(modes, "Hex")
|
||||
}
|
||||
if m&Oct != 0 {
|
||||
modes = append(modes, "Oct")
|
||||
}
|
||||
return "IntMode(" + strings.Join(modes, "|") + ")"
|
||||
}
|
||||
|
||||
var errIntAmbig = fmt.Errorf("ambiguous integer value; must include '0' prefix")
|
||||
|
||||
func prefix0(val string) bool {
|
||||
return strings.HasPrefix(val, "0") || strings.HasPrefix(val, "-0")
|
||||
}
|
||||
|
||||
func prefix0x(val string) bool {
|
||||
return strings.HasPrefix(val, "0x") || strings.HasPrefix(val, "-0x")
|
||||
}
|
||||
|
||||
// ParseInt parses val using mode into intptr, which must be a pointer to an
|
||||
// integer kind type. Non-decimal value require prefix `0` or `0x` in the cases
|
||||
// when mode permits ambiguity of base; otherwise the prefix can be omitted.
|
||||
func ParseInt(intptr interface{}, val string, mode IntMode) error {
|
||||
val = strings.TrimSpace(val)
|
||||
verb := byte(0)
|
||||
switch mode {
|
||||
case Dec:
|
||||
verb = 'd'
|
||||
case Dec + Hex:
|
||||
if prefix0x(val) {
|
||||
verb = 'v'
|
||||
} else {
|
||||
verb = 'd'
|
||||
}
|
||||
case Dec + Oct:
|
||||
if prefix0(val) && !prefix0x(val) {
|
||||
verb = 'v'
|
||||
} else {
|
||||
verb = 'd'
|
||||
}
|
||||
case Dec + Hex + Oct:
|
||||
verb = 'v'
|
||||
case Hex:
|
||||
if prefix0x(val) {
|
||||
verb = 'v'
|
||||
} else {
|
||||
verb = 'x'
|
||||
}
|
||||
case Oct:
|
||||
verb = 'o'
|
||||
case Hex + Oct:
|
||||
if prefix0(val) {
|
||||
verb = 'v'
|
||||
} else {
|
||||
return errIntAmbig
|
||||
}
|
||||
}
|
||||
if verb == 0 {
|
||||
panic("unsupported mode")
|
||||
}
|
||||
return ScanFully(intptr, val, verb)
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func elem(p interface{}) interface{} {
|
||||
return reflect.ValueOf(p).Elem().Interface()
|
||||
}
|
||||
|
||||
func TestParseInt(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
val string
|
||||
mode IntMode
|
||||
exp interface{}
|
||||
ok bool
|
||||
}{
|
||||
{"0", Dec, int(0), true},
|
||||
{"10", Dec, int(10), true},
|
||||
{"-10", Dec, int(-10), true},
|
||||
{"x", Dec, int(0), false},
|
||||
{"0xa", Hex, int(0xa), true},
|
||||
{"a", Hex, int(0xa), true},
|
||||
{"10", Hex, int(0x10), true},
|
||||
{"-0xa", Hex, int(-0xa), true},
|
||||
{"0x", Hex, int(0x0), true}, // Scanf doesn't require digit behind 0x
|
||||
{"-0x", Hex, int(0x0), true}, // Scanf doesn't require digit behind 0x
|
||||
{"-a", Hex, int(-0xa), true},
|
||||
{"-10", Hex, int(-0x10), true},
|
||||
{"x", Hex, int(0), false},
|
||||
{"10", Oct, int(010), true},
|
||||
{"010", Oct, int(010), true},
|
||||
{"-10", Oct, int(-010), true},
|
||||
{"-010", Oct, int(-010), true},
|
||||
{"10", Dec | Hex, int(10), true},
|
||||
{"010", Dec | Hex, int(10), true},
|
||||
{"0x10", Dec | Hex, int(0x10), true},
|
||||
{"10", Dec | Oct, int(10), true},
|
||||
{"010", Dec | Oct, int(010), true},
|
||||
{"0x10", Dec | Oct, int(0), false},
|
||||
{"10", Hex | Oct, int(0), false}, // need prefix to distinguish Hex/Oct
|
||||
{"010", Hex | Oct, int(010), true},
|
||||
{"0x10", Hex | Oct, int(0x10), true},
|
||||
{"10", Dec | Hex | Oct, int(10), true},
|
||||
{"010", Dec | Hex | Oct, int(010), true},
|
||||
{"0x10", Dec | Hex | Oct, int(0x10), true},
|
||||
} {
|
||||
typ := reflect.TypeOf(tt.exp)
|
||||
res := reflect.New(typ).Interface()
|
||||
err := ParseInt(res, tt.val, tt.mode)
|
||||
switch {
|
||||
case tt.ok && err != nil:
|
||||
t.Errorf("ParseInt(%v, %#v, %v): fail; got error %v, want ok",
|
||||
typ, tt.val, tt.mode, err)
|
||||
case !tt.ok && err == nil:
|
||||
t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want error",
|
||||
typ, tt.val, tt.mode, elem(res))
|
||||
case tt.ok && !reflect.DeepEqual(elem(res), tt.exp):
|
||||
t.Errorf("ParseInt(%v, %#v, %v): fail; got %v, want %v",
|
||||
typ, tt.val, tt.mode, elem(res), tt.exp)
|
||||
default:
|
||||
t.Logf("ParseInt(%v, %#v, %s): pass; got %v, error %v",
|
||||
typ, tt.val, tt.mode, elem(res), err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// ScanFully uses fmt.Sscanf with verb to fully scan val into ptr.
|
||||
func ScanFully(ptr interface{}, val string, verb byte) error {
|
||||
t := reflect.ValueOf(ptr).Elem().Type()
|
||||
// attempt to read extra bytes to make sure the value is consumed
|
||||
var b []byte
|
||||
n, err := fmt.Sscanf(val, "%"+string(verb)+"%s", ptr, &b)
|
||||
switch {
|
||||
case n < 1 || n == 1 && err != io.EOF:
|
||||
return fmt.Errorf("failed to parse %q as %v: %v", val, t, err)
|
||||
case n > 1:
|
||||
return fmt.Errorf("failed to parse %q as %v: extra characters %q", val, t, string(b))
|
||||
}
|
||||
// n == 1 && err == io.EOF
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,36 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestScanFully(t *testing.T) {
|
||||
for _, tt := range []struct {
|
||||
val string
|
||||
verb byte
|
||||
res interface{}
|
||||
ok bool
|
||||
}{
|
||||
{"a", 'v', int(0), false},
|
||||
{"0x", 'v', int(0), true},
|
||||
{"0x", 'd', int(0), false},
|
||||
} {
|
||||
d := reflect.New(reflect.TypeOf(tt.res)).Interface()
|
||||
err := ScanFully(d, tt.val, tt.verb)
|
||||
switch {
|
||||
case tt.ok && err != nil:
|
||||
t.Errorf("ScanFully(%T, %q, '%c'): want ok, got error %v",
|
||||
d, tt.val, tt.verb, err)
|
||||
case !tt.ok && err == nil:
|
||||
t.Errorf("ScanFully(%T, %q, '%c'): want error, got %v",
|
||||
d, tt.val, tt.verb, elem(d))
|
||||
case tt.ok && err == nil && !reflect.DeepEqual(tt.res, elem(d)):
|
||||
t.Errorf("ScanFully(%T, %q, '%c'): want %v, got %v",
|
||||
d, tt.val, tt.verb, tt.res, elem(d))
|
||||
default:
|
||||
t.Logf("ScanFully(%T, %q, '%c') = %v; *ptr==%v",
|
||||
d, tt.val, tt.verb, err, elem(d))
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue