vendor: update dependencies

This commit is contained in:
Cadey Ratio 2017-04-28 16:28:25 -07:00
parent cc602bc7c5
commit e06286b5cd
63 changed files with 9868 additions and 0 deletions

View File

@ -106,3 +106,15 @@ ff09b135c25aae272398c51a07235b90a75aa4f0 github.com/pkg/errors
62f833fc9f6c4d3223bdb37bd0c2f8951bed8596 github.com/google/gops/internal
62f833fc9f6c4d3223bdb37bd0c2f8951bed8596 github.com/google/gops/signal
c2c54e542fb797ad986b31721e1baedf214ca413 github.com/kardianos/osext
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/codec
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/codec/json
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/index
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/internal
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/q
e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/boltdb/bolt
9474f19b515f52326c7d197d2d097caa7fc7485e github.com/caarlos0/env
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/containers
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/trees
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/trees/redblacktree
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/utils

47
vendor/github.com/asdine/storm/bucket.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package storm
import "github.com/boltdb/bolt"
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
func (n *node) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
var b *bolt.Bucket
var err error
bucketNames := append(n.rootBucket, bucket)
for _, bucketName := range bucketNames {
if b != nil {
if b, err = b.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
return nil, err
}
} else {
if b, err = tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
return nil, err
}
}
}
return b, nil
}
// GetBucket returns the given bucket below the current node.
func (n *node) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
var b *bolt.Bucket
bucketNames := append(n.rootBucket, children...)
for _, bucketName := range bucketNames {
if b != nil {
if b = b.Bucket([]byte(bucketName)); b == nil {
return nil
}
} else {
if b = tx.Bucket([]byte(bucketName)); b == nil {
return nil
}
}
}
return b
}

11
vendor/github.com/asdine/storm/codec/codec.go generated vendored Normal file
View File

@ -0,0 +1,11 @@
// Package codec contains sub-packages with different codecs that can be used
// to encode and decode entities in Storm.
package codec
// MarshalUnmarshaler represents a codec used to marshal and unmarshal entities.
type MarshalUnmarshaler interface {
Marshal(v interface{}) ([]byte, error)
Unmarshal(b []byte, v interface{}) error
// name of this codec
Name() string
}

25
vendor/github.com/asdine/storm/codec/json/json.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Package json contains a codec to encode and decode entities in JSON format
package json
import (
"encoding/json"
)
const name = "json"
// Codec that encodes to and decodes from JSON.
var Codec = new(jsonCodec)
type jsonCodec int
func (j jsonCodec) Marshal(v interface{}) ([]byte, error) {
return json.Marshal(v)
}
func (j jsonCodec) Unmarshal(b []byte, v interface{}) error {
return json.Unmarshal(b, v)
}
func (j jsonCodec) Name() string {
return name
}

54
vendor/github.com/asdine/storm/errors.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package storm
import "errors"
// Errors
var (
// ErrNoID is returned when no ID field or id tag is found in the struct.
ErrNoID = errors.New("missing struct tag id or ID field")
// ErrZeroID is returned when the ID field is a zero value.
ErrZeroID = errors.New("id field must not be a zero value")
// ErrBadType is returned when a method receives an unexpected value type.
ErrBadType = errors.New("provided data must be a struct or a pointer to struct")
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
ErrAlreadyExists = errors.New("already exists")
// ErrNilParam is returned when the specified param is expected to be not nil.
ErrNilParam = errors.New("param must not be nil")
// ErrUnknownTag is returned when an unexpected tag is specified.
ErrUnknownTag = errors.New("unknown tag")
// ErrIdxNotFound is returned when the specified index is not found.
ErrIdxNotFound = errors.New("index not found")
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to slice.
ErrSlicePtrNeeded = errors.New("provided target must be a pointer to slice")
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to struct.
ErrStructPtrNeeded = errors.New("provided target must be a pointer to struct")
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer.
ErrPtrNeeded = errors.New("provided target must be a pointer to a valid variable")
// ErrNoName is returned when the specified struct has no name.
ErrNoName = errors.New("provided target must have a name")
// ErrNotFound is returned when the specified record is not saved in the bucket.
ErrNotFound = errors.New("not found")
// ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
ErrNotInTransaction = errors.New("not in transaction")
// ErrUnAddressable is returned when a struct or an exported field of a struct is unaddressable
ErrUnAddressable = errors.New("unaddressable value")
// ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field
ErrIncompatibleValue = errors.New("incompatible value")
// ErrDifferentCodec is returned when using a codec different than the first codec used with the bucket.
ErrDifferentCodec = errors.New("the selected codec is incompatible with this bucket")
)

223
vendor/github.com/asdine/storm/extract.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
package storm
import (
"fmt"
"reflect"
"strconv"
"strings"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
)
// Storm tags
const (
tagID = "id"
tagIdx = "index"
tagUniqueIdx = "unique"
tagInline = "inline"
tagIncrement = "increment"
indexPrefix = "__storm_index_"
)
type fieldConfig struct {
Name string
Index string
IsZero bool
IsID bool
Increment bool
IncrementStart int64
IsInteger bool
Value *reflect.Value
}
// structConfig is a structure gathering all the relevant informations about a model
type structConfig struct {
Name string
Fields map[string]*fieldConfig
ID *fieldConfig
}
func extract(s *reflect.Value, mi ...*structConfig) (*structConfig, error) {
if s.Kind() == reflect.Ptr {
e := s.Elem()
s = &e
}
if s.Kind() != reflect.Struct {
return nil, ErrBadType
}
typ := s.Type()
var child bool
var m *structConfig
if len(mi) > 0 {
m = mi[0]
child = true
} else {
m = &structConfig{}
m.Fields = make(map[string]*fieldConfig)
}
if m.Name == "" {
m.Name = typ.Name()
}
numFields := s.NumField()
for i := 0; i < numFields; i++ {
field := typ.Field(i)
value := s.Field(i)
if field.PkgPath != "" {
continue
}
err := extractField(&value, &field, m, child)
if err != nil {
return nil, err
}
}
if child {
return m, nil
}
if m.ID == nil {
return nil, ErrNoID
}
if m.Name == "" {
return nil, ErrNoName
}
return m, nil
}
func extractField(value *reflect.Value, field *reflect.StructField, m *structConfig, isChild bool) error {
var f *fieldConfig
var err error
tag := field.Tag.Get("storm")
if tag != "" {
f = &fieldConfig{
Name: field.Name,
IsZero: isZero(value),
IsInteger: isInteger(value),
Value: value,
IncrementStart: 1,
}
tags := strings.Split(tag, ",")
for _, tag := range tags {
switch tag {
case "id":
f.IsID = true
case tagUniqueIdx, tagIdx:
f.Index = tag
case tagInline:
if value.Kind() == reflect.Ptr {
e := value.Elem()
value = &e
}
if value.Kind() == reflect.Struct {
a := value.Addr()
_, err := extract(&a, m)
if err != nil {
return err
}
}
// we don't need to save this field
return nil
default:
if strings.HasPrefix(tag, tagIncrement) {
f.Increment = true
parts := strings.Split(tag, "=")
if parts[0] != tagIncrement {
return ErrUnknownTag
}
if len(parts) > 1 {
f.IncrementStart, err = strconv.ParseInt(parts[1], 0, 64)
if err != nil {
return err
}
}
} else {
return ErrUnknownTag
}
}
}
if _, ok := m.Fields[f.Name]; !ok || !isChild {
m.Fields[f.Name] = f
}
}
if m.ID == nil && f != nil && f.IsID {
m.ID = f
}
// the field is named ID and no ID field has been detected before
if m.ID == nil && field.Name == "ID" {
if f == nil {
f = &fieldConfig{
Name: field.Name,
IsZero: isZero(value),
IsInteger: isInteger(value),
IsID: true,
Value: value,
IncrementStart: 1,
}
m.Fields[field.Name] = f
}
m.ID = f
}
return nil
}
func extractSingleField(ref *reflect.Value, fieldName string) (*structConfig, error) {
var cfg structConfig
cfg.Fields = make(map[string]*fieldConfig)
f, ok := ref.Type().FieldByName(fieldName)
if !ok || f.PkgPath != "" {
return nil, fmt.Errorf("field %s not found", fieldName)
}
v := ref.FieldByName(fieldName)
err := extractField(&v, &f, &cfg, false)
if err != nil {
return nil, err
}
return &cfg, nil
}
func getIndex(bucket *bolt.Bucket, idxKind string, fieldName string) (index.Index, error) {
var idx index.Index
var err error
switch idxKind {
case tagUniqueIdx:
idx, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
case tagIdx:
idx, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
default:
err = ErrIdxNotFound
}
return idx, err
}
func isZero(v *reflect.Value) bool {
zero := reflect.Zero(v.Type()).Interface()
current := v.Interface()
return reflect.DeepEqual(current, zero)
}
func isInteger(v *reflect.Value) bool {
kind := v.Kind()
return v != nil && kind >= reflect.Int && kind <= reflect.Uint64
}

416
vendor/github.com/asdine/storm/finder.go generated vendored Normal file
View File

@ -0,0 +1,416 @@
package storm
import (
"reflect"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
// A Finder can fetch types from BoltDB
type Finder interface {
// One returns one record by the specified index
One(fieldName string, value interface{}, to interface{}) error
// Find returns one or more records by the specified index
Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error
// AllByIndex gets all the records of a bucket that are indexed in the specified index
AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error
// All gets all the records of a bucket.
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
All(to interface{}, options ...func(*index.Options)) error
// Select a list of records that match a list of matchers. Doesn't use indexes.
Select(matchers ...q.Matcher) Query
// Range returns one or more records by the specified index within the specified range
Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error
// Count counts all the records of a bucket
Count(data interface{}) (int, error)
}
// One returns one record by the specified index
func (n *node) One(fieldName string, value interface{}, to interface{}) error {
sink, err := newFirstSink(n, to)
if err != nil {
return err
}
bucketName := sink.bucketName()
if bucketName == "" {
return ErrNoName
}
if fieldName == "" {
return ErrNotFound
}
ref := reflect.Indirect(sink.ref)
cfg, err := extractSingleField(&ref, fieldName)
if err != nil {
return err
}
field, ok := cfg.Fields[fieldName]
if !ok || (!field.IsID && field.Index == "") {
query := newQuery(n, q.StrictEq(fieldName, value))
if n.tx != nil {
err = query.query(n.tx, sink)
} else {
err = n.s.Bolt.View(func(tx *bolt.Tx) error {
return query.query(tx, sink)
})
}
if err != nil {
return err
}
return sink.flush()
}
val, err := toBytes(value, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
return n.one(tx, bucketName, fieldName, cfg, to, val, field.IsID)
})
}
func (n *node) one(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, to interface{}, val []byte, skipIndex bool) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return ErrNotFound
}
var id []byte
if !skipIndex {
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
id = idx.Get(val)
} else {
id = val
}
if id == nil {
return ErrNotFound
}
raw := bucket.Get(id)
if raw == nil {
return ErrNotFound
}
return n.s.codec.Unmarshal(raw, to)
}
// Find returns one or more records by the specified index
func (n *node) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
sink, err := newListSink(n, to)
if err != nil {
return err
}
bucketName := sink.bucketName()
if bucketName == "" {
return ErrNoName
}
ref := reflect.Indirect(reflect.New(sink.elemType))
cfg, err := extractSingleField(&ref, fieldName)
if err != nil {
return err
}
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
field, ok := cfg.Fields[fieldName]
if !ok || (!field.IsID && (field.Index == "" || value == nil)) {
sink.limit = opts.Limit
sink.skip = opts.Skip
query := newQuery(n, q.Eq(fieldName, value))
if opts.Reverse {
query.Reverse()
}
err = n.readTx(func(tx *bolt.Tx) error {
return query.query(tx, sink)
})
if err != nil {
return err
}
return sink.flush()
}
val, err := toBytes(value, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
return n.find(tx, bucketName, fieldName, cfg, sink, val, opts)
})
}
func (n *node) find(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, val []byte, opts *index.Options) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return ErrNotFound
}
sorter := newSorter(n)
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
if err != nil {
return err
}
list, err := idx.All(val, opts)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
for i := range list {
raw := bucket.Get(list[i])
if raw == nil {
return ErrNotFound
}
_, err = sorter.filter(sink, nil, bucket, list[i], raw)
if err != nil {
return err
}
}
return sink.flush()
}
// AllByIndex gets all the records of a bucket that are indexed in the specified index
func (n *node) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
if fieldName == "" {
return n.All(to, options...)
}
ref := reflect.ValueOf(to)
if ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Slice {
return ErrSlicePtrNeeded
}
typ := reflect.Indirect(ref).Type().Elem()
if typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
newElem := reflect.New(typ)
cfg, err := extract(&newElem)
if err != nil {
return err
}
if cfg.ID.Name == fieldName {
return n.All(to, options...)
}
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
return n.readTx(func(tx *bolt.Tx) error {
return n.allByIndex(tx, fieldName, cfg, &ref, opts)
})
}
func (n *node) allByIndex(tx *bolt.Tx, fieldName string, cfg *structConfig, ref *reflect.Value, opts *index.Options) error {
bucket := n.GetBucket(tx, cfg.Name)
if bucket == nil {
return ErrNotFound
}
fieldCfg, ok := cfg.Fields[fieldName]
if !ok {
return ErrNotFound
}
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
if err != nil {
return err
}
list, err := idx.AllRecords(opts)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
for i := range list {
raw := bucket.Get(list[i])
if raw == nil {
return ErrNotFound
}
err = n.s.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
if err != nil {
return err
}
}
reflect.Indirect(*ref).Set(results)
return nil
}
// All gets all the records of a bucket.
// If there are no records it returns no error and the 'to' parameter is set to an empty slice.
func (n *node) All(to interface{}, options ...func(*index.Options)) error {
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
query := newQuery(n, nil).Limit(opts.Limit).Skip(opts.Skip)
if opts.Reverse {
query.Reverse()
}
err := query.Find(to)
if err != nil && err != ErrNotFound {
return err
}
if err == ErrNotFound {
ref := reflect.ValueOf(to)
results := reflect.MakeSlice(reflect.Indirect(ref).Type(), 0, 0)
reflect.Indirect(ref).Set(results)
}
return nil
}
// Range returns one or more records by the specified index within the specified range
func (n *node) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
sink, err := newListSink(n, to)
if err != nil {
return err
}
bucketName := sink.bucketName()
if bucketName == "" {
return ErrNoName
}
ref := reflect.Indirect(reflect.New(sink.elemType))
cfg, err := extractSingleField(&ref, fieldName)
if err != nil {
return err
}
opts := index.NewOptions()
for _, fn := range options {
fn(opts)
}
field, ok := cfg.Fields[fieldName]
if !ok || (!field.IsID && field.Index == "") {
sink.limit = opts.Limit
sink.skip = opts.Skip
query := newQuery(n, q.And(q.Gte(fieldName, min), q.Lte(fieldName, max)))
if opts.Reverse {
query.Reverse()
}
err = n.readTx(func(tx *bolt.Tx) error {
return query.query(tx, sink)
})
if err != nil {
return err
}
return sink.flush()
}
mn, err := toBytes(min, n.s.codec)
if err != nil {
return err
}
mx, err := toBytes(max, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
return n.rnge(tx, bucketName, fieldName, cfg, sink, mn, mx, opts)
})
}
func (n *node) rnge(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig, sink *listSink, min, max []byte, opts *index.Options) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
reflect.Indirect(sink.ref).SetLen(0)
return nil
}
sorter := newSorter(n)
idx, err := getIndex(bucket, cfg.Fields[fieldName].Index, fieldName)
if err != nil {
return err
}
list, err := idx.Range(min, max, opts)
if err != nil {
return err
}
sink.results = reflect.MakeSlice(reflect.Indirect(sink.ref).Type(), len(list), len(list))
for i := range list {
raw := bucket.Get(list[i])
if raw == nil {
return ErrNotFound
}
_, err = sorter.filter(sink, nil, bucket, list[i], raw)
if err != nil {
return err
}
}
return sink.flush()
}
// Count counts all the records of a bucket
func (n *node) Count(data interface{}) (int, error) {
return n.Select().Count(data)
}

14
vendor/github.com/asdine/storm/index/errors.go generated vendored Normal file
View File

@ -0,0 +1,14 @@
package index
import "errors"
var (
// ErrNotFound is returned when the specified record is not saved in the bucket.
ErrNotFound = errors.New("not found")
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
ErrAlreadyExists = errors.New("already exists")
// ErrNilParam is returned when the specified param is expected to be not nil.
ErrNilParam = errors.New("param must not be nil")
)

13
vendor/github.com/asdine/storm/index/indexes.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Package index contains Index engines used to store values and their corresponding IDs
package index
// Index interface
type Index interface {
Add(value []byte, targetID []byte) error
Remove(value []byte) error
RemoveID(id []byte) error
Get(value []byte) []byte
All(value []byte, opts *Options) ([][]byte, error)
AllRecords(opts *Options) ([][]byte, error)
Range(min []byte, max []byte, opts *Options) ([][]byte, error)
}

245
vendor/github.com/asdine/storm/index/list.go generated vendored Normal file
View File

@ -0,0 +1,245 @@
package index
import (
"bytes"
"github.com/asdine/storm/internal"
"github.com/boltdb/bolt"
)
// NewListIndex loads a ListIndex
func NewListIndex(parent *bolt.Bucket, indexName []byte) (*ListIndex, error) {
var err error
b := parent.Bucket(indexName)
if b == nil {
if !parent.Writable() {
return nil, ErrNotFound
}
b, err = parent.CreateBucket(indexName)
if err != nil {
return nil, err
}
}
ids, err := NewUniqueIndex(b, []byte("storm__ids"))
if err != nil {
return nil, err
}
return &ListIndex{
IndexBucket: b,
Parent: parent,
IDs: ids,
}, nil
}
// ListIndex is an index that references values and the corresponding IDs.
type ListIndex struct {
Parent *bolt.Bucket
IndexBucket *bolt.Bucket
IDs *UniqueIndex
}
// Add a value to the list index
func (idx *ListIndex) Add(newValue []byte, targetID []byte) error {
if newValue == nil || len(newValue) == 0 {
return ErrNilParam
}
if targetID == nil || len(targetID) == 0 {
return ErrNilParam
}
key := idx.IDs.Get(targetID)
if key != nil {
err := idx.IndexBucket.Delete(key)
if err != nil {
return err
}
err = idx.IDs.Remove(targetID)
if err != nil {
return err
}
key = key[:0]
}
key = append(key, newValue...)
key = append(key, '_')
key = append(key, '_')
key = append(key, targetID...)
err := idx.IDs.Add(targetID, key)
if err != nil {
return err
}
return idx.IndexBucket.Put(key, targetID)
}
// Remove a value from the unique index
func (idx *ListIndex) Remove(value []byte) error {
var err error
var keys [][]byte
c := idx.IndexBucket.Cursor()
prefix := generatePrefix(value)
for k, _ := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, _ = c.Next() {
keys = append(keys, k)
}
for _, k := range keys {
err = idx.IndexBucket.Delete(k)
if err != nil {
return err
}
}
return idx.IDs.RemoveID(value)
}
// RemoveID removes an ID from the list index
func (idx *ListIndex) RemoveID(targetID []byte) error {
value := idx.IDs.Get(targetID)
if value == nil {
return nil
}
err := idx.IndexBucket.Delete(value)
if err != nil {
return err
}
return idx.IDs.Remove(targetID)
}
// Get the first ID corresponding to the given value
func (idx *ListIndex) Get(value []byte) []byte {
c := idx.IndexBucket.Cursor()
prefix := generatePrefix(value)
for k, id := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, id = c.Next() {
return id
}
return nil
}
// All the IDs corresponding to the given value
func (idx *ListIndex) All(value []byte, opts *Options) ([][]byte, error) {
var list [][]byte
c := idx.IndexBucket.Cursor()
cur := internal.Cursor{C: c, Reverse: opts != nil && opts.Reverse}
prefix := generatePrefix(value)
k, id := c.Seek(prefix)
if cur.Reverse {
var count int
for ; bytes.HasPrefix(k, prefix) && k != nil; k, _ = c.Next() {
count++
}
k, id = c.Prev()
list = make([][]byte, 0, count)
}
for ; bytes.HasPrefix(k, prefix); k, id = cur.Next() {
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, id)
}
return list, nil
}
// AllRecords returns all the IDs of this index
func (idx *ListIndex) AllRecords(opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
for k, id := c.First(); k != nil; k, id = c.Next() {
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
continue
}
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, id)
}
return list, nil
}
// Range returns the ids corresponding to the given range of values
func (idx *ListIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.RangeCursor{
C: idx.IndexBucket.Cursor(),
Reverse: opts != nil && opts.Reverse,
Min: min,
Max: max,
CompareFn: func(val, limit []byte) int {
pos := bytes.LastIndex(val, []byte("__"))
return bytes.Compare(val[:pos], limit)
},
}
for k, id := c.First(); c.Continue(k); k, id = c.Next() {
if id == nil || bytes.Equal(k, []byte("storm__ids")) {
continue
}
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, id)
}
return list, nil
}
func generatePrefix(value []byte) []byte {
prefix := make([]byte, len(value)+2)
var i int
for i = range value {
prefix[i] = value[i]
}
prefix[i+1] = '_'
prefix[i+2] = '_'
return prefix
}

15
vendor/github.com/asdine/storm/index/options.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
package index
// NewOptions creates initialized Options
func NewOptions() *Options {
return &Options{
Limit: -1,
}
}
// Options are used to customize queries
type Options struct {
Limit int
Skip int
Reverse bool
}

154
vendor/github.com/asdine/storm/index/unique.go generated vendored Normal file
View File

@ -0,0 +1,154 @@
package index
import (
"bytes"
"github.com/asdine/storm/internal"
"github.com/boltdb/bolt"
)
// NewUniqueIndex loads a UniqueIndex
func NewUniqueIndex(parent *bolt.Bucket, indexName []byte) (*UniqueIndex, error) {
var err error
b := parent.Bucket(indexName)
if b == nil {
if !parent.Writable() {
return nil, ErrNotFound
}
b, err = parent.CreateBucket(indexName)
if err != nil {
return nil, err
}
}
return &UniqueIndex{
IndexBucket: b,
Parent: parent,
}, nil
}
// UniqueIndex is an index that references unique values and the corresponding ID.
type UniqueIndex struct {
Parent *bolt.Bucket
IndexBucket *bolt.Bucket
}
// Add a value to the unique index
func (idx *UniqueIndex) Add(value []byte, targetID []byte) error {
if value == nil || len(value) == 0 {
return ErrNilParam
}
if targetID == nil || len(targetID) == 0 {
return ErrNilParam
}
exists := idx.IndexBucket.Get(value)
if exists != nil {
if bytes.Equal(exists, targetID) {
return nil
}
return ErrAlreadyExists
}
return idx.IndexBucket.Put(value, targetID)
}
// Remove a value from the unique index
func (idx *UniqueIndex) Remove(value []byte) error {
return idx.IndexBucket.Delete(value)
}
// RemoveID removes an ID from the unique index
func (idx *UniqueIndex) RemoveID(id []byte) error {
c := idx.IndexBucket.Cursor()
for val, ident := c.First(); val != nil; val, ident = c.Next() {
if bytes.Equal(ident, id) {
return idx.Remove(val)
}
}
return nil
}
// Get the id corresponding to the given value
func (idx *UniqueIndex) Get(value []byte) []byte {
return idx.IndexBucket.Get(value)
}
// All returns all the ids corresponding to the given value
func (idx *UniqueIndex) All(value []byte, opts *Options) ([][]byte, error) {
id := idx.IndexBucket.Get(value)
if id != nil {
return [][]byte{id}, nil
}
return nil, nil
}
// AllRecords returns all the IDs of this index
func (idx *UniqueIndex) AllRecords(opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.Cursor{C: idx.IndexBucket.Cursor(), Reverse: opts != nil && opts.Reverse}
for val, ident := c.First(); val != nil; val, ident = c.Next() {
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, ident)
}
return list, nil
}
// Range returns the ids corresponding to the given range of values
func (idx *UniqueIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
var list [][]byte
c := internal.RangeCursor{
C: idx.IndexBucket.Cursor(),
Reverse: opts != nil && opts.Reverse,
Min: min,
Max: max,
CompareFn: func(val, limit []byte) int {
return bytes.Compare(val, limit)
},
}
for val, ident := c.First(); val != nil && c.Continue(val); val, ident = c.Next() {
if opts != nil && opts.Skip > 0 {
opts.Skip--
continue
}
if opts != nil && opts.Limit == 0 {
break
}
if opts != nil && opts.Limit > 0 {
opts.Limit--
}
list = append(list, ident)
}
return list, nil
}
// first returns the first ID of this index
func (idx *UniqueIndex) first() []byte {
c := idx.IndexBucket.Cursor()
for val, ident := c.First(); val != nil; val, ident = c.Next() {
return ident
}
return nil
}

63
vendor/github.com/asdine/storm/internal/boltdb.go generated vendored Normal file
View File

@ -0,0 +1,63 @@
package internal
import "github.com/boltdb/bolt"
// Cursor that can be reversed
type Cursor struct {
C *bolt.Cursor
Reverse bool
}
// First element
func (c *Cursor) First() ([]byte, []byte) {
if c.Reverse {
return c.C.Last()
}
return c.C.First()
}
// Next element
func (c *Cursor) Next() ([]byte, []byte) {
if c.Reverse {
return c.C.Prev()
}
return c.C.Next()
}
// RangeCursor that can be reversed
type RangeCursor struct {
C *bolt.Cursor
Reverse bool
Min []byte
Max []byte
CompareFn func([]byte, []byte) int
}
// First element
func (c *RangeCursor) First() ([]byte, []byte) {
if c.Reverse {
return c.C.Seek(c.Max)
}
return c.C.Seek(c.Min)
}
// Next element
func (c *RangeCursor) Next() ([]byte, []byte) {
if c.Reverse {
return c.C.Prev()
}
return c.C.Next()
}
// Continue tells if the loop needs to continue
func (c *RangeCursor) Continue(val []byte) bool {
if c.Reverse {
return val != nil && c.CompareFn(val, c.Min) >= 0
}
return val != nil && c.CompareFn(val, c.Max) <= 0
}

145
vendor/github.com/asdine/storm/kv.go generated vendored Normal file
View File

@ -0,0 +1,145 @@
package storm
import (
"reflect"
"github.com/boltdb/bolt"
)
// KeyValueStore can store and fetch values by key
type KeyValueStore interface {
// Get a value from a bucket
Get(bucketName string, key interface{}, to interface{}) error
// Set a key/value pair into a bucket
Set(bucketName string, key interface{}, value interface{}) error
// Delete deletes a key from a bucket
Delete(bucketName string, key interface{}) error
// GetBytes gets a raw value from a bucket.
GetBytes(bucketName string, key interface{}) ([]byte, error)
// SetBytes sets a raw value into a bucket.
SetBytes(bucketName string, key interface{}, value []byte) error
}
// GetBytes gets a raw value from a bucket.
func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) {
id, err := toBytes(key, n.s.codec)
if err != nil {
return nil, err
}
var val []byte
return val, n.readTx(func(tx *bolt.Tx) error {
raw, err := n.getBytes(tx, bucketName, id)
if err != nil {
return err
}
val = make([]byte, len(raw))
copy(val, raw)
return nil
})
}
// GetBytes gets a raw value from a bucket.
func (n *node) getBytes(tx *bolt.Tx, bucketName string, id []byte) ([]byte, error) {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return nil, ErrNotFound
}
raw := bucket.Get(id)
if raw == nil {
return nil, ErrNotFound
}
return raw, nil
}
// SetBytes sets a raw value into a bucket.
func (n *node) SetBytes(bucketName string, key interface{}, value []byte) error {
if key == nil {
return ErrNilParam
}
id, err := toBytes(key, n.s.codec)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.setBytes(tx, bucketName, id, value)
})
}
func (n *node) setBytes(tx *bolt.Tx, bucketName string, id, data []byte) error {
bucket, err := n.CreateBucketIfNotExists(tx, bucketName)
if err != nil {
return err
}
// save node configuration in the bucket
_, err = newMeta(bucket, n)
if err != nil {
return err
}
return bucket.Put(id, data)
}
// Get a value from a bucket
func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
ref := reflect.ValueOf(to)
if !ref.IsValid() || ref.Kind() != reflect.Ptr {
return ErrPtrNeeded
}
id, err := toBytes(key, n.s.codec)
if err != nil {
return err
}
return n.readTx(func(tx *bolt.Tx) error {
raw, err := n.getBytes(tx, bucketName, id)
if err != nil {
return err
}
return n.s.codec.Unmarshal(raw, to)
})
}
// Set a key/value pair into a bucket
func (n *node) Set(bucketName string, key interface{}, value interface{}) error {
var data []byte
var err error
if value != nil {
data, err = n.s.codec.Marshal(value)
if err != nil {
return err
}
}
return n.SetBytes(bucketName, key, data)
}
// Delete deletes a key from a bucket
func (n *node) Delete(bucketName string, key interface{}) error {
id, err := toBytes(key, n.s.codec)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.delete(tx, bucketName, id)
})
}
func (n *node) delete(tx *bolt.Tx, bucketName string, id []byte) error {
bucket := n.GetBucket(tx, bucketName)
if bucket == nil {
return ErrNotFound
}
return bucket.Delete(id)
}

69
vendor/github.com/asdine/storm/metadata.go generated vendored Normal file
View File

@ -0,0 +1,69 @@
package storm
import (
"reflect"
"github.com/boltdb/bolt"
)
const (
metaCodec = "codec"
)
func newMeta(b *bolt.Bucket, n Node) (*meta, error) {
m := b.Bucket([]byte(metadataBucket))
if m != nil {
name := m.Get([]byte(metaCodec))
if string(name) != n.Codec().Name() {
return nil, ErrDifferentCodec
}
return &meta{
node: n,
bucket: m,
}, nil
}
m, err := b.CreateBucket([]byte(metadataBucket))
if err != nil {
return nil, err
}
m.Put([]byte(metaCodec), []byte(n.Codec().Name()))
return &meta{
node: n,
bucket: m,
}, nil
}
type meta struct {
node Node
bucket *bolt.Bucket
}
func (m *meta) increment(field *fieldConfig) error {
var err error
counter := field.IncrementStart
raw := m.bucket.Get([]byte(field.Name + "counter"))
if raw != nil {
counter, err = numberfromb(raw)
if err != nil {
return err
}
counter++
}
raw, err = numbertob(counter)
if err != nil {
return err
}
err = m.bucket.Put([]byte(field.Name+"counter"), raw)
if err != nil {
return err
}
field.Value.Set(reflect.ValueOf(counter).Convert(field.Value.Type()))
field.IsZero = false
return nil
}

125
vendor/github.com/asdine/storm/node.go generated vendored Normal file
View File

@ -0,0 +1,125 @@
package storm
import (
"github.com/asdine/storm/codec"
"github.com/boltdb/bolt"
)
// A Node in Storm represents the API to a BoltDB bucket.
type Node interface {
Tx
TypeStore
KeyValueStore
BucketScanner
// From returns a new Storm node with a new bucket root below the current.
// All DB operations on the new node will be executed relative to this bucket.
From(addend ...string) Node
// Bucket returns the bucket name as a slice from the root.
// In the normal, simple case this will be empty.
Bucket() []string
// GetBucket returns the given bucket below the current node.
GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error)
// WithTransaction returns a New Storm node that will use the given transaction.
WithTransaction(tx *bolt.Tx) Node
// Begin starts a new transaction.
Begin(writable bool) (Node, error)
// Codec used by this instance of Storm
Codec() codec.MarshalUnmarshaler
// WithCodec returns a New Storm Node that will use the given Codec.
WithCodec(codec codec.MarshalUnmarshaler) Node
// WithBatch returns a new Storm Node with the batch mode enabled.
WithBatch(enabled bool) Node
}
// A Node in Storm represents the API to a BoltDB bucket.
type node struct {
s *DB
// The root bucket. In the normal, simple case this will be empty.
rootBucket []string
// Transaction object. Nil if not in transaction
tx *bolt.Tx
// Codec of this node
codec codec.MarshalUnmarshaler
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
}
// From returns a new Storm Node with a new bucket root below the current.
// All DB operations on the new node will be executed relative to this bucket.
func (n node) From(addend ...string) Node {
n.rootBucket = append(n.rootBucket, addend...)
return &n
}
// WithTransaction returns a new Storm Node that will use the given transaction.
func (n node) WithTransaction(tx *bolt.Tx) Node {
n.tx = tx
return &n
}
// WithCodec returns a new Storm Node that will use the given Codec.
func (n node) WithCodec(codec codec.MarshalUnmarshaler) Node {
n.codec = codec
return &n
}
// WithBatch returns a new Storm Node with the batch mode enabled.
func (n node) WithBatch(enabled bool) Node {
n.batchMode = enabled
return &n
}
// Bucket returns the bucket name as a slice from the root.
// In the normal, simple case this will be empty.
func (n *node) Bucket() []string {
return n.rootBucket
}
// Codec returns the EncodeDecoder used by this instance of Storm
func (n *node) Codec() codec.MarshalUnmarshaler {
return n.codec
}
// Detects if already in transaction or runs a read write transaction.
// Uses batch mode if enabled.
func (n *node) readWriteTx(fn func(tx *bolt.Tx) error) error {
if n.tx != nil {
return fn(n.tx)
}
if n.batchMode {
return n.s.Bolt.Batch(func(tx *bolt.Tx) error {
return fn(tx)
})
}
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
return fn(tx)
})
}
// Detects if already in transaction or runs a read transaction.
func (n *node) readTx(fn func(tx *bolt.Tx) error) error {
if n.tx != nil {
return fn(n.tx)
}
return n.s.Bolt.View(func(tx *bolt.Tx) error {
return fn(tx)
})
}

82
vendor/github.com/asdine/storm/options.go generated vendored Normal file
View File

@ -0,0 +1,82 @@
package storm
import (
"os"
"github.com/asdine/storm/codec"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
)
// BoltOptions used to pass options to BoltDB.
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*DB) error {
return func(d *DB) error {
d.boltMode = mode
d.boltOptions = options
return nil
}
}
// Codec used to set a custom encoder and decoder. The default is JSON.
func Codec(c codec.MarshalUnmarshaler) func(*DB) error {
return func(d *DB) error {
d.codec = c
return nil
}
}
// Batch enables the use of batch instead of update for read-write transactions.
func Batch() func(*DB) error {
return func(d *DB) error {
d.batchMode = true
return nil
}
}
// AutoIncrement used to enable bolt.NextSequence on empty integer ids.
// Deprecated: Set the increment tag to the id field instead.
func AutoIncrement() func(*DB) error {
return func(d *DB) error {
d.autoIncrement = true
return nil
}
}
// Root used to set the root bucket. See also the From method.
func Root(root ...string) func(*DB) error {
return func(d *DB) error {
d.rootBucket = root
return nil
}
}
// UseDB allow Storm to use an existing open Bolt.DB.
// Warning: storm.DB.Close() will close the bolt.DB instance.
func UseDB(b *bolt.DB) func(*DB) error {
return func(d *DB) error {
d.Path = b.Path()
d.Bolt = b
return nil
}
}
// Limit sets the maximum number of records to return
func Limit(limit int) func(*index.Options) {
return func(opts *index.Options) {
opts.Limit = limit
}
}
// Skip sets the number of records to skip
func Skip(offset int) func(*index.Options) {
return func(opts *index.Options) {
opts.Skip = offset
}
}
// Reverse will return the results in descending order
func Reverse() func(*index.Options) {
return func(opts *index.Options) {
opts.Reverse = true
}
}

78
vendor/github.com/asdine/storm/q/compare.go generated vendored Normal file
View File

@ -0,0 +1,78 @@
package q
import (
"go/constant"
"go/token"
"reflect"
"strconv"
)
func compare(a, b interface{}, tok token.Token) bool {
vala := reflect.ValueOf(a)
valb := reflect.ValueOf(b)
ak := vala.Kind()
bk := valb.Kind()
switch {
// comparing nil values
case (ak == reflect.Ptr || ak == reflect.Slice || ak == reflect.Interface || ak == reflect.Invalid) &&
(bk == reflect.Ptr || ak == reflect.Slice || bk == reflect.Interface || bk == reflect.Invalid) &&
(!vala.IsValid() || vala.IsNil()) && (!valb.IsValid() || valb.IsNil()):
return true
case ak >= reflect.Int && ak <= reflect.Int64:
if bk >= reflect.Int && bk <= reflect.Int64 {
return constant.Compare(constant.MakeInt64(vala.Int()), tok, constant.MakeInt64(valb.Int()))
}
if bk == reflect.Float32 || bk == reflect.Float64 {
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(valb.Float()))
}
if bk == reflect.String {
bla, err := strconv.ParseFloat(valb.String(), 64)
if err != nil {
return false
}
return constant.Compare(constant.MakeFloat64(float64(vala.Int())), tok, constant.MakeFloat64(bla))
}
case ak == reflect.Float32 || ak == reflect.Float64:
if bk == reflect.Float32 || bk == reflect.Float64 {
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(valb.Float()))
}
if bk >= reflect.Int && bk <= reflect.Int64 {
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(float64(valb.Int())))
}
if bk == reflect.String {
bla, err := strconv.ParseFloat(valb.String(), 64)
if err != nil {
return false
}
return constant.Compare(constant.MakeFloat64(vala.Float()), tok, constant.MakeFloat64(bla))
}
case ak == reflect.String:
if bk == reflect.String {
return constant.Compare(constant.MakeString(vala.String()), tok, constant.MakeString(valb.String()))
}
}
if reflect.TypeOf(a).String() == "time.Time" && reflect.TypeOf(b).String() == "time.Time" {
var x, y int64
x = 1
if vala.MethodByName("Equal").Call([]reflect.Value{valb})[0].Bool() {
y = 1
} else if vala.MethodByName("Before").Call([]reflect.Value{valb})[0].Bool() {
y = 2
}
return constant.Compare(constant.MakeInt64(x), tok, constant.MakeInt64(y))
}
if tok == token.EQL {
return reflect.DeepEqual(a, b)
}
return false
}

39
vendor/github.com/asdine/storm/q/fieldmatcher.go generated vendored Normal file
View File

@ -0,0 +1,39 @@
package q
import (
"errors"
"reflect"
)
// ErrUnknownField is returned when an unknown field is passed.
var ErrUnknownField = errors.New("unknown field")
type fieldMatcherDelegate struct {
FieldMatcher
Field string
}
// NewFieldMatcher creates a Matcher for a given field.
func NewFieldMatcher(field string, fm FieldMatcher) Matcher {
return fieldMatcherDelegate{Field: field, FieldMatcher: fm}
}
// FieldMatcher can be used in NewFieldMatcher as a simple way to create the
// most common Matcher: A Matcher that evaluates one field's value.
// For more complex scenarios, implement the Matcher interface directly.
type FieldMatcher interface {
MatchField(v interface{}) (bool, error)
}
func (r fieldMatcherDelegate) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return r.MatchValue(&v)
}
func (r fieldMatcherDelegate) MatchValue(v *reflect.Value) (bool, error) {
field := v.FieldByName(r.Field)
if !field.IsValid() {
return false, ErrUnknownField
}
return r.MatchField(field.Interface())
}

51
vendor/github.com/asdine/storm/q/regexp.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package q
import (
"fmt"
"regexp"
"sync"
)
// Re creates a regexp matcher. It checks if the given field matches the given regexp.
// Note that this only supports fields of type string or []byte.
func Re(field string, re string) Matcher {
regexpCache.RLock()
if r, ok := regexpCache.m[re]; ok {
regexpCache.RUnlock()
return NewFieldMatcher(field, &regexpMatcher{r: r})
}
regexpCache.RUnlock()
regexpCache.Lock()
r, err := regexp.Compile(re)
if err == nil {
regexpCache.m[re] = r
}
regexpCache.Unlock()
return NewFieldMatcher(field, &regexpMatcher{r: r, err: err})
}
var regexpCache = struct {
sync.RWMutex
m map[string]*regexp.Regexp
}{m: make(map[string]*regexp.Regexp)}
type regexpMatcher struct {
r *regexp.Regexp
err error
}
func (r *regexpMatcher) MatchField(v interface{}) (bool, error) {
if r.err != nil {
return false, r.err
}
switch fieldValue := v.(type) {
case string:
return r.r.MatchString(fieldValue), nil
case []byte:
return r.r.Match(fieldValue), nil
default:
return false, fmt.Errorf("Only string and []byte supported for regexp matcher, got %T", fieldValue)
}
}

222
vendor/github.com/asdine/storm/q/tree.go generated vendored Normal file
View File

@ -0,0 +1,222 @@
// Package q contains a list of Matchers used to compare struct fields with values
package q
import (
"go/token"
"reflect"
)
// A Matcher is used to test against a record to see if it matches.
type Matcher interface {
// Match is used to test the criteria against a structure.
Match(interface{}) (bool, error)
}
// A ValueMatcher is used to test against a reflect.Value.
type ValueMatcher interface {
// MatchValue tests if the given reflect.Value matches.
// It is useful when the reflect.Value of an object already exists.
MatchValue(*reflect.Value) (bool, error)
}
type cmp struct {
value interface{}
token token.Token
}
func (c *cmp) MatchField(v interface{}) (bool, error) {
return compare(v, c.value, c.token), nil
}
type trueMatcher struct{}
func (*trueMatcher) Match(i interface{}) (bool, error) {
return true, nil
}
func (*trueMatcher) MatchValue(v *reflect.Value) (bool, error) {
return true, nil
}
type or struct {
children []Matcher
}
func (c *or) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return c.MatchValue(&v)
}
func (c *or) MatchValue(v *reflect.Value) (bool, error) {
for _, matcher := range c.children {
if vm, ok := matcher.(ValueMatcher); ok {
ok, err := vm.MatchValue(v)
if err != nil {
return false, err
}
if ok {
return true, nil
}
continue
}
ok, err := matcher.Match(v.Interface())
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
type and struct {
children []Matcher
}
func (c *and) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return c.MatchValue(&v)
}
func (c *and) MatchValue(v *reflect.Value) (bool, error) {
for _, matcher := range c.children {
if vm, ok := matcher.(ValueMatcher); ok {
ok, err := vm.MatchValue(v)
if err != nil {
return false, err
}
if !ok {
return false, nil
}
continue
}
ok, err := matcher.Match(v.Interface())
if err != nil {
return false, err
}
if !ok {
return false, nil
}
}
return true, nil
}
type strictEq struct {
field string
value interface{}
}
func (s *strictEq) MatchField(v interface{}) (bool, error) {
return reflect.DeepEqual(v, s.value), nil
}
type in struct {
list interface{}
}
func (i *in) MatchField(v interface{}) (bool, error) {
ref := reflect.ValueOf(i.list)
if ref.Kind() != reflect.Slice {
return false, nil
}
c := cmp{
token: token.EQL,
}
for i := 0; i < ref.Len(); i++ {
c.value = ref.Index(i).Interface()
ok, err := c.MatchField(v)
if err != nil {
return false, err
}
if ok {
return true, nil
}
}
return false, nil
}
type not struct {
children []Matcher
}
func (n *not) Match(i interface{}) (bool, error) {
v := reflect.Indirect(reflect.ValueOf(i))
return n.MatchValue(&v)
}
func (n *not) MatchValue(v *reflect.Value) (bool, error) {
var err error
for _, matcher := range n.children {
vm, ok := matcher.(ValueMatcher)
if ok {
ok, err = vm.MatchValue(v)
} else {
ok, err = matcher.Match(v.Interface())
}
if err != nil {
return false, err
}
if ok {
return false, nil
}
}
return true, nil
}
// Eq matcher, checks if the given field is equal to the given value
func Eq(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.EQL})
}
// StrictEq matcher, checks if the given field is deeply equal to the given value
func StrictEq(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &strictEq{value: v})
}
// Gt matcher, checks if the given field is greater than the given value
func Gt(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.GTR})
}
// Gte matcher, checks if the given field is greater than or equal to the given value
func Gte(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.GEQ})
}
// Lt matcher, checks if the given field is lesser than the given value
func Lt(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.LSS})
}
// Lte matcher, checks if the given field is lesser than or equal to the given value
func Lte(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &cmp{value: v, token: token.LEQ})
}
// In matcher, checks if the given field matches one of the value of the given slice.
// v must be a slice.
func In(field string, v interface{}) Matcher {
return NewFieldMatcher(field, &in{list: v})
}
// True matcher, always returns true
func True() Matcher { return &trueMatcher{} }
// Or matcher, checks if at least one of the given matchers matches the record
func Or(matchers ...Matcher) Matcher { return &or{children: matchers} }
// And matcher, checks if all of the given matchers matches the record
func And(matchers ...Matcher) Matcher { return &and{children: matchers} }
// Not matcher, checks if all of the given matchers return false
func Not(matchers ...Matcher) Matcher { return &not{children: matchers} }

231
vendor/github.com/asdine/storm/query.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
package storm
import (
"github.com/asdine/storm/internal"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
// Select a list of records that match a list of matchers. Doesn't use indexes.
func (n *node) Select(matchers ...q.Matcher) Query {
tree := q.And(matchers...)
return newQuery(n, tree)
}
// Query is the low level query engine used by Storm. It allows to operate searches through an entire bucket.
type Query interface {
// Skip matching records by the given number
Skip(int) Query
// Limit the results by the given number
Limit(int) Query
// Order by the given field.
OrderBy(string) Query
// Reverse the order of the results
Reverse() Query
// Bucket specifies the bucket name
Bucket(string) Query
// Find a list of matching records
Find(interface{}) error
// First gets the first matching record
First(interface{}) error
// Delete all matching records
Delete(interface{}) error
// Count all the matching records
Count(interface{}) (int, error)
// Returns all the records without decoding them
Raw() ([][]byte, error)
// Execute the given function for each raw element
RawEach(func([]byte, []byte) error) error
// Execute the given function for each element
Each(interface{}, func(interface{}) error) error
}
func newQuery(n *node, tree q.Matcher) *query {
return &query{
skip: 0,
limit: -1,
node: n,
tree: tree,
sorter: newSorter(n),
}
}
type query struct {
limit int
skip int
reverse bool
tree q.Matcher
node *node
bucket string
sorter *sorter
}
func (q *query) Skip(nb int) Query {
q.skip = nb
return q
}
func (q *query) Limit(nb int) Query {
q.limit = nb
return q
}
func (q *query) OrderBy(field string) Query {
q.sorter.orderBy = field
return q
}
func (q *query) Reverse() Query {
q.reverse = true
q.sorter.reverse = true
return q
}
func (q *query) Bucket(bucketName string) Query {
q.bucket = bucketName
return q
}
func (q *query) Find(to interface{}) error {
sink, err := newListSink(q.node, to)
if err != nil {
return err
}
sink.limit = q.limit
sink.skip = q.skip
return q.runQuery(sink)
}
func (q *query) First(to interface{}) error {
sink, err := newFirstSink(q.node, to)
if err != nil {
return err
}
sink.skip = q.skip
return q.runQuery(sink)
}
func (q *query) Delete(kind interface{}) error {
sink, err := newDeleteSink(q.node, kind)
if err != nil {
return err
}
sink.limit = q.limit
sink.skip = q.skip
return q.runQuery(sink)
}
func (q *query) Count(kind interface{}) (int, error) {
sink, err := newCountSink(q.node, kind)
if err != nil {
return 0, err
}
sink.limit = q.limit
sink.skip = q.skip
err = q.runQuery(sink)
if err != nil {
return 0, err
}
return sink.counter, nil
}
func (q *query) Raw() ([][]byte, error) {
sink := newRawSink()
sink.limit = q.limit
sink.skip = q.skip
err := q.runQuery(sink)
if err != nil {
return nil, err
}
return sink.results, nil
}
func (q *query) RawEach(fn func([]byte, []byte) error) error {
sink := newRawSink()
sink.limit = q.limit
sink.skip = q.skip
sink.execFn = fn
return q.runQuery(sink)
}
func (q *query) Each(kind interface{}, fn func(interface{}) error) error {
sink, err := newEachSink(kind)
if err != nil {
return err
}
sink.limit = q.limit
sink.skip = q.skip
sink.execFn = fn
return q.runQuery(sink)
}
func (q *query) runQuery(sink sink) error {
if q.node.tx != nil {
return q.query(q.node.tx, sink)
}
if sink.readOnly() {
return q.node.s.Bolt.View(func(tx *bolt.Tx) error {
return q.query(tx, sink)
})
}
return q.node.s.Bolt.Update(func(tx *bolt.Tx) error {
return q.query(tx, sink)
})
}
func (q *query) query(tx *bolt.Tx, sink sink) error {
bucketName := q.bucket
if bucketName == "" {
bucketName = sink.bucketName()
}
bucket := q.node.GetBucket(tx, bucketName)
if q.limit == 0 {
return q.sorter.flush(sink)
}
if bucket != nil {
c := internal.Cursor{C: bucket.Cursor(), Reverse: q.reverse}
for k, v := c.First(); k != nil; k, v = c.Next() {
if v == nil {
continue
}
stop, err := q.sorter.filter(sink, q.tree, bucket, k, v)
if err != nil {
return err
}
if stop {
break
}
}
}
return q.sorter.flush(sink)
}

99
vendor/github.com/asdine/storm/scan.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
package storm
import (
"bytes"
"github.com/boltdb/bolt"
)
// A BucketScanner scans a Node for a list of buckets
type BucketScanner interface {
// PrefixScan scans the root buckets for keys matching the given prefix.
PrefixScan(prefix string) []Node
// PrefixScan scans the buckets in this node for keys matching the given prefix.
RangeScan(min, max string) []Node
}
// PrefixScan scans the buckets in this node for keys matching the given prefix.
func (n *node) PrefixScan(prefix string) []Node {
if n.tx != nil {
return n.prefixScan(n.tx, prefix)
}
var nodes []Node
n.readTx(func(tx *bolt.Tx) error {
nodes = n.prefixScan(tx, prefix)
return nil
})
return nodes
}
func (n *node) prefixScan(tx *bolt.Tx, prefix string) []Node {
var (
prefixBytes = []byte(prefix)
nodes []Node
c = n.cursor(tx)
)
for k, v := c.Seek(prefixBytes); k != nil && bytes.HasPrefix(k, prefixBytes); k, v = c.Next() {
if v != nil {
continue
}
nodes = append(nodes, n.From(string(k)))
}
return nodes
}
// RangeScan scans the buckets in this node over a range such as a sortable time range.
func (n *node) RangeScan(min, max string) []Node {
if n.tx != nil {
return n.rangeScan(n.tx, min, max)
}
var nodes []Node
n.readTx(func(tx *bolt.Tx) error {
nodes = n.rangeScan(tx, min, max)
return nil
})
return nodes
}
func (n *node) rangeScan(tx *bolt.Tx, min, max string) []Node {
var (
minBytes = []byte(min)
maxBytes = []byte(max)
nodes []Node
c = n.cursor(tx)
)
for k, v := c.Seek(minBytes); k != nil && bytes.Compare(k, maxBytes) <= 0; k, v = c.Next() {
if v != nil {
continue
}
nodes = append(nodes, n.From(string(k)))
}
return nodes
}
func (n *node) cursor(tx *bolt.Tx) *bolt.Cursor {
var c *bolt.Cursor
if len(n.rootBucket) > 0 {
c = n.GetBucket(tx).Cursor()
} else {
c = tx.Cursor()
}
return c
}

515
vendor/github.com/asdine/storm/sink.go generated vendored Normal file
View File

@ -0,0 +1,515 @@
package storm
import (
"encoding/binary"
"reflect"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
rbt "github.com/emirpasic/gods/trees/redblacktree"
)
type item struct {
value *reflect.Value
bucket *bolt.Bucket
k []byte
v []byte
}
func newSorter(node Node) *sorter {
return &sorter{
node: node,
rbTree: rbt.NewWithStringComparator(),
}
}
type sorter struct {
node Node
rbTree *rbt.Tree
orderBy string
reverse bool
counter int64
}
func (s *sorter) filter(snk sink, tree q.Matcher, bucket *bolt.Bucket, k, v []byte) (bool, error) {
s.counter++
rsnk, ok := snk.(reflectSink)
if !ok {
return snk.add(&item{
bucket: bucket,
k: k,
v: v,
})
}
newElem := rsnk.elem()
err := s.node.Codec().Unmarshal(v, newElem.Interface())
if err != nil {
return false, err
}
ok = tree == nil
if !ok {
ok, err = tree.Match(newElem.Interface())
if err != nil {
return false, err
}
}
if ok {
it := item{
bucket: bucket,
value: &newElem,
k: k,
v: v,
}
if s.orderBy != "" {
elm := reflect.Indirect(newElem).FieldByName(s.orderBy)
if !elm.IsValid() {
return false, ErrNotFound
}
raw, err := toBytes(elm.Interface(), s.node.Codec())
if err != nil {
return false, err
}
key := make([]byte, len(raw)+8)
for i := 0; i < len(raw); i++ {
key[i] = raw[i]
}
binary.PutVarint(key[len(raw):], s.counter)
s.rbTree.Put(string(key), &it)
return false, nil
}
return snk.add(&it)
}
return false, nil
}
func (s *sorter) flush(snk sink) error {
if s.orderBy == "" {
return snk.flush()
}
s.orderBy = ""
var err error
var stop bool
it := s.rbTree.Iterator()
if s.reverse {
it.End()
} else {
it.Begin()
}
for (s.reverse && it.Prev()) || (!s.reverse && it.Next()) {
item := it.Value().(*item)
stop, err = snk.add(item)
if err != nil {
return err
}
if stop {
break
}
}
return snk.flush()
}
type sink interface {
bucketName() string
flush() error
add(*item) (bool, error)
readOnly() bool
}
type reflectSink interface {
elem() reflect.Value
}
func newListSink(node Node, to interface{}) (*listSink, error) {
ref := reflect.ValueOf(to)
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
return nil, ErrSlicePtrNeeded
}
sliceType := reflect.Indirect(ref).Type()
elemType := sliceType.Elem()
if elemType.Kind() == reflect.Ptr {
elemType = elemType.Elem()
}
if elemType.Name() == "" {
return nil, ErrNoName
}
return &listSink{
node: node,
ref: ref,
isPtr: sliceType.Elem().Kind() == reflect.Ptr,
elemType: elemType,
name: elemType.Name(),
limit: -1,
}, nil
}
type listSink struct {
node Node
ref reflect.Value
results reflect.Value
elemType reflect.Type
name string
isPtr bool
skip int
limit int
idx int
}
func (l *listSink) elem() reflect.Value {
if l.results.IsValid() && l.idx < l.results.Len() {
return l.results.Index(l.idx).Addr()
}
return reflect.New(l.elemType)
}
func (l *listSink) bucketName() string {
return l.name
}
func (l *listSink) add(i *item) (bool, error) {
if l.limit == 0 {
return true, nil
}
if l.skip > 0 {
l.skip--
return false, nil
}
if !l.results.IsValid() {
l.results = reflect.MakeSlice(reflect.Indirect(l.ref).Type(), 0, 0)
}
if l.limit > 0 {
l.limit--
}
if l.idx == l.results.Len() {
if l.isPtr {
l.results = reflect.Append(l.results, *i.value)
} else {
l.results = reflect.Append(l.results, reflect.Indirect(*i.value))
}
}
l.idx++
return l.limit == 0, nil
}
func (l *listSink) flush() error {
if l.results.IsValid() && l.results.Len() > 0 {
reflect.Indirect(l.ref).Set(l.results)
return nil
}
return ErrNotFound
}
func (l *listSink) readOnly() bool {
return true
}
func newFirstSink(node Node, to interface{}) (*firstSink, error) {
ref := reflect.ValueOf(to)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &firstSink{
node: node,
ref: ref,
}, nil
}
type firstSink struct {
node Node
ref reflect.Value
skip int
found bool
}
func (f *firstSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(f.ref).Type())
}
func (f *firstSink) bucketName() string {
return reflect.Indirect(f.ref).Type().Name()
}
func (f *firstSink) add(i *item) (bool, error) {
if f.skip > 0 {
f.skip--
return false, nil
}
reflect.Indirect(f.ref).Set(i.value.Elem())
f.found = true
return true, nil
}
func (f *firstSink) flush() error {
if !f.found {
return ErrNotFound
}
return nil
}
func (f *firstSink) readOnly() bool {
return true
}
func newDeleteSink(node Node, kind interface{}) (*deleteSink, error) {
ref := reflect.ValueOf(kind)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &deleteSink{
node: node,
ref: ref,
}, nil
}
type deleteSink struct {
node Node
ref reflect.Value
skip int
limit int
removed int
}
func (d *deleteSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(d.ref).Type())
}
func (d *deleteSink) bucketName() string {
return reflect.Indirect(d.ref).Type().Name()
}
func (d *deleteSink) add(i *item) (bool, error) {
if d.skip > 0 {
d.skip--
return false, nil
}
if d.limit > 0 {
d.limit--
}
info, err := extract(&d.ref)
if err != nil {
return false, err
}
for fieldName, fieldCfg := range info.Fields {
if fieldCfg.Index == "" {
continue
}
idx, err := getIndex(i.bucket, fieldCfg.Index, fieldName)
if err != nil {
return false, err
}
err = idx.RemoveID(i.k)
if err != nil {
if err == index.ErrNotFound {
return false, ErrNotFound
}
return false, err
}
}
d.removed++
return d.limit == 0, i.bucket.Delete(i.k)
}
func (d *deleteSink) flush() error {
if d.removed == 0 {
return ErrNotFound
}
return nil
}
func (d *deleteSink) readOnly() bool {
return false
}
func newCountSink(node Node, kind interface{}) (*countSink, error) {
ref := reflect.ValueOf(kind)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &countSink{
node: node,
ref: ref,
}, nil
}
type countSink struct {
node Node
ref reflect.Value
skip int
limit int
counter int
}
func (c *countSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(c.ref).Type())
}
func (c *countSink) bucketName() string {
return reflect.Indirect(c.ref).Type().Name()
}
func (c *countSink) add(i *item) (bool, error) {
if c.skip > 0 {
c.skip--
return false, nil
}
if c.limit > 0 {
c.limit--
}
c.counter++
return c.limit == 0, nil
}
func (c *countSink) flush() error {
return nil
}
func (c *countSink) readOnly() bool {
return true
}
func newRawSink() *rawSink {
return &rawSink{
limit: -1,
}
}
type rawSink struct {
results [][]byte
skip int
limit int
execFn func([]byte, []byte) error
}
func (r *rawSink) add(i *item) (bool, error) {
if r.limit == 0 {
return true, nil
}
if r.skip > 0 {
r.skip--
return false, nil
}
if r.limit > 0 {
r.limit--
}
if r.execFn != nil {
err := r.execFn(i.k, i.v)
if err != nil {
return false, err
}
} else {
r.results = append(r.results, i.v)
}
return r.limit == 0, nil
}
func (r *rawSink) bucketName() string {
return ""
}
func (r *rawSink) flush() error {
return nil
}
func (r *rawSink) readOnly() bool {
return true
}
func newEachSink(to interface{}) (*eachSink, error) {
ref := reflect.ValueOf(to)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return nil, ErrStructPtrNeeded
}
return &eachSink{
ref: ref,
}, nil
}
type eachSink struct {
skip int
limit int
ref reflect.Value
execFn func(interface{}) error
}
func (e *eachSink) elem() reflect.Value {
return reflect.New(reflect.Indirect(e.ref).Type())
}
func (e *eachSink) bucketName() string {
return reflect.Indirect(e.ref).Type().Name()
}
func (e *eachSink) add(i *item) (bool, error) {
if e.limit == 0 {
return true, nil
}
if e.skip > 0 {
e.skip--
return false, nil
}
if e.limit > 0 {
e.limit--
}
err := e.execFn(i.value.Interface())
if err != nil {
return false, err
}
return e.limit == 0, nil
}
func (e *eachSink) flush() error {
return nil
}
func (e *eachSink) readOnly() bool {
return true
}

431
vendor/github.com/asdine/storm/store.go generated vendored Normal file
View File

@ -0,0 +1,431 @@
package storm
import (
"bytes"
"reflect"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
// TypeStore stores user defined types in BoltDB
type TypeStore interface {
Finder
// Init creates the indexes and buckets for a given structure
Init(data interface{}) error
// ReIndex rebuilds all the indexes of a bucket
ReIndex(data interface{}) error
// Save a structure
Save(data interface{}) error
// Update a structure
Update(data interface{}) error
// UpdateField updates a single field
UpdateField(data interface{}, fieldName string, value interface{}) error
// Drop a bucket
Drop(data interface{}) error
// DeleteStruct deletes a structure from the associated bucket
DeleteStruct(data interface{}) error
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
Remove(data interface{}) error
}
// Init creates the indexes and buckets for a given structure
func (n *node) Init(data interface{}) error {
v := reflect.ValueOf(data)
cfg, err := extract(&v)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.init(tx, cfg)
})
}
func (n *node) init(tx *bolt.Tx, cfg *structConfig) error {
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
if err != nil {
return err
}
// save node configuration in the bucket
_, err = newMeta(bucket, n)
if err != nil {
return err
}
for fieldName, fieldCfg := range cfg.Fields {
if fieldCfg.Index == "" {
continue
}
switch fieldCfg.Index {
case tagUniqueIdx:
_, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
case tagIdx:
_, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
default:
err = ErrIdxNotFound
}
if err != nil {
return err
}
}
return nil
}
func (n *node) ReIndex(data interface{}) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.reIndex(tx, data, cfg)
})
}
func (n *node) reIndex(tx *bolt.Tx, data interface{}, cfg *structConfig) error {
root := n.WithTransaction(tx)
nodes := root.From(cfg.Name).PrefixScan(indexPrefix)
bucket := root.GetBucket(tx, cfg.Name)
if bucket == nil {
return ErrNotFound
}
for _, node := range nodes {
buckets := node.Bucket()
name := buckets[len(buckets)-1]
err := bucket.DeleteBucket([]byte(name))
if err != nil {
return err
}
}
total, err := root.Count(data)
if err != nil {
return err
}
for i := 0; i < total; i++ {
err = root.Select(q.True()).Skip(i).First(data)
if err != nil {
return err
}
err = root.Update(data)
if err != nil {
return err
}
}
return nil
}
// Save a structure
func (n *node) Save(data interface{}) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
if cfg.ID.IsZero {
if !cfg.ID.IsInteger || (!n.s.autoIncrement && !cfg.ID.Increment) {
return ErrZeroID
}
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.save(tx, cfg, data, true)
})
}
func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, edit bool) error {
bucket, err := n.CreateBucketIfNotExists(tx, cfg.Name)
if err != nil {
return err
}
// save node configuration in the bucket
meta, err := newMeta(bucket, n)
if err != nil {
return err
}
if cfg.ID.IsZero {
err = meta.increment(cfg.ID)
if err != nil {
return err
}
}
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec)
if err != nil {
return err
}
for fieldName, fieldCfg := range cfg.Fields {
if edit && !fieldCfg.IsID && fieldCfg.Increment && fieldCfg.IsInteger && fieldCfg.IsZero {
err = meta.increment(fieldCfg)
if err != nil {
return err
}
}
if fieldCfg.Index == "" {
continue
}
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
if err != nil {
return err
}
if fieldCfg.IsZero {
err = idx.RemoveID(id)
if err != nil {
return err
}
continue
}
value, err := toBytes(fieldCfg.Value.Interface(), n.s.codec)
if err != nil {
return err
}
var found bool
idsSaved, err := idx.All(value, nil)
if err != nil {
return err
}
for _, idSaved := range idsSaved {
if bytes.Compare(idSaved, id) == 0 {
found = true
break
}
}
if found {
continue
}
err = idx.RemoveID(id)
if err != nil {
return err
}
err = idx.Add(value, id)
if err != nil {
if err == index.ErrAlreadyExists {
return ErrAlreadyExists
}
return err
}
}
raw, err := n.s.codec.Marshal(data)
if err != nil {
return err
}
return bucket.Put(id, raw)
}
// Update a structure
func (n *node) Update(data interface{}) error {
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
numfield := ref.NumField()
for i := 0; i < numfield; i++ {
f := ref.Field(i)
if ref.Type().Field(i).PkgPath != "" {
continue
}
zero := reflect.Zero(f.Type()).Interface()
actual := f.Interface()
if !reflect.DeepEqual(actual, zero) {
cf := current.Field(i)
cf.Set(f)
idxInfo, ok := cfg.Fields[ref.Type().Field(i).Name]
if ok {
idxInfo.Value = &cf
}
}
}
return nil
})
}
// UpdateField updates a single field
func (n *node) UpdateField(data interface{}, fieldName string, value interface{}) error {
return n.update(data, func(ref *reflect.Value, current *reflect.Value, cfg *structConfig) error {
f := current.FieldByName(fieldName)
if !f.IsValid() {
return ErrNotFound
}
tf, _ := current.Type().FieldByName(fieldName)
if tf.PkgPath != "" {
return ErrNotFound
}
v := reflect.ValueOf(value)
if v.Kind() != f.Kind() {
return ErrIncompatibleValue
}
f.Set(v)
idxInfo, ok := cfg.Fields[fieldName]
if ok {
idxInfo.Value = &f
idxInfo.IsZero = isZero(idxInfo.Value)
}
return nil
})
}
func (n *node) update(data interface{}, fn func(*reflect.Value, *reflect.Value, *structConfig) error) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
if cfg.ID.IsZero {
return ErrNoID
}
current := reflect.New(reflect.Indirect(ref).Type())
return n.readWriteTx(func(tx *bolt.Tx) error {
err = n.WithTransaction(tx).One(cfg.ID.Name, cfg.ID.Value.Interface(), current.Interface())
if err != nil {
return err
}
ref = ref.Elem()
cref := current.Elem()
err = fn(&ref, &cref, cfg)
if err != nil {
return err
}
return n.save(tx, cfg, current.Interface(), false)
})
}
// Drop a bucket
func (n *node) Drop(data interface{}) error {
var bucketName string
v := reflect.ValueOf(data)
if v.Kind() != reflect.String {
info, err := extract(&v)
if err != nil {
return err
}
bucketName = info.Name
} else {
bucketName = v.Interface().(string)
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.drop(tx, bucketName)
})
}
func (n *node) drop(tx *bolt.Tx, bucketName string) error {
bucket := n.GetBucket(tx)
if bucket == nil {
return tx.DeleteBucket([]byte(bucketName))
}
return bucket.DeleteBucket([]byte(bucketName))
}
// DeleteStruct deletes a structure from the associated bucket
func (n *node) DeleteStruct(data interface{}) error {
ref := reflect.ValueOf(data)
if !ref.IsValid() || ref.Kind() != reflect.Ptr || ref.Elem().Kind() != reflect.Struct {
return ErrStructPtrNeeded
}
cfg, err := extract(&ref)
if err != nil {
return err
}
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec)
if err != nil {
return err
}
return n.readWriteTx(func(tx *bolt.Tx) error {
return n.deleteStruct(tx, cfg, id)
})
}
func (n *node) deleteStruct(tx *bolt.Tx, cfg *structConfig, id []byte) error {
bucket := n.GetBucket(tx, cfg.Name)
if bucket == nil {
return ErrNotFound
}
for fieldName, fieldCfg := range cfg.Fields {
if fieldCfg.Index == "" {
continue
}
idx, err := getIndex(bucket, fieldCfg.Index, fieldName)
if err != nil {
return err
}
err = idx.RemoveID(id)
if err != nil {
if err == index.ErrNotFound {
return ErrNotFound
}
return err
}
}
raw := bucket.Get(id)
if raw == nil {
return ErrNotFound
}
return bucket.Delete(id)
}
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (n *node) Remove(data interface{}) error {
return n.DeleteStruct(data)
}

330
vendor/github.com/asdine/storm/storm.go generated vendored Normal file
View File

@ -0,0 +1,330 @@
package storm
import (
"bytes"
"encoding/binary"
"os"
"time"
"github.com/asdine/storm/codec"
"github.com/asdine/storm/codec/json"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
)
const (
dbinfo = "__storm_db"
metadataBucket = "__storm_metadata"
)
// Defaults to json
var defaultCodec = json.Codec
// Open opens a database at the given path with optional Storm options.
func Open(path string, stormOptions ...func(*DB) error) (*DB, error) {
var err error
s := &DB{
Path: path,
codec: defaultCodec,
}
for _, option := range stormOptions {
if err = option(s); err != nil {
return nil, err
}
}
if s.boltMode == 0 {
s.boltMode = 0600
}
if s.boltOptions == nil {
s.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
}
s.root = &node{s: s, rootBucket: s.rootBucket, codec: s.codec, batchMode: s.batchMode}
// skip if UseDB option is used
if s.Bolt == nil {
s.Bolt, err = bolt.Open(path, s.boltMode, s.boltOptions)
if err != nil {
return nil, err
}
err = s.checkVersion()
if err != nil {
return nil, err
}
}
return s, nil
}
// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
// needed operations
type DB struct {
// Path of the database file
Path string
// Handles encoding and decoding of objects
codec codec.MarshalUnmarshaler
// Bolt is still easily accessible
Bolt *bolt.DB
// Bolt file mode
boltMode os.FileMode
// Bolt options
boltOptions *bolt.Options
// Enable auto increment on empty integer fields
autoIncrement bool
// The root node that points to the root bucket.
root *node
// The root bucket name
rootBucket []string
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
}
// From returns a new Storm node with a new bucket root.
// All DB operations on the new node will be executed relative to the given
// bucket.
func (s *DB) From(root ...string) Node {
newNode := *s.root
newNode.rootBucket = root
return &newNode
}
// WithTransaction returns a New Storm node that will use the given transaction.
func (s *DB) WithTransaction(tx *bolt.Tx) Node {
return s.root.WithTransaction(tx)
}
// Bucket returns the root bucket name as a slice.
// In the normal, simple case this will be empty.
func (s *DB) Bucket() []string {
return s.root.Bucket()
}
// Close the database
func (s *DB) Close() error {
return s.Bolt.Close()
}
// Codec returns the EncodeDecoder used by this instance of Storm
func (s *DB) Codec() codec.MarshalUnmarshaler {
return s.codec
}
// WithCodec returns a New Storm Node that will use the given Codec.
func (s *DB) WithCodec(codec codec.MarshalUnmarshaler) Node {
n := s.From().(*node)
n.codec = codec
return n
}
// WithBatch returns a new Storm Node with the batch mode enabled.
func (s *DB) WithBatch(enabled bool) Node {
n := s.From().(*node)
n.batchMode = enabled
return n
}
// Get a value from a bucket
func (s *DB) Get(bucketName string, key interface{}, to interface{}) error {
return s.root.Get(bucketName, key, to)
}
// Set a key/value pair into a bucket
func (s *DB) Set(bucketName string, key interface{}, value interface{}) error {
return s.root.Set(bucketName, key, value)
}
// Delete deletes a key from a bucket
func (s *DB) Delete(bucketName string, key interface{}) error {
return s.root.Delete(bucketName, key)
}
// GetBytes gets a raw value from a bucket.
func (s *DB) GetBytes(bucketName string, key interface{}) ([]byte, error) {
return s.root.GetBytes(bucketName, key)
}
// SetBytes sets a raw value into a bucket.
func (s *DB) SetBytes(bucketName string, key interface{}, value []byte) error {
return s.root.SetBytes(bucketName, key, value)
}
// Save a structure
func (s *DB) Save(data interface{}) error {
return s.root.Save(data)
}
// PrefixScan scans the root buckets for keys matching the given prefix.
func (s *DB) PrefixScan(prefix string) []Node {
return s.root.PrefixScan(prefix)
}
// RangeScan scans the root buckets over a range such as a sortable time range.
func (s *DB) RangeScan(min, max string) []Node {
return s.root.RangeScan(min, max)
}
// Select a list of records that match a list of matchers. Doesn't use indexes.
func (s *DB) Select(matchers ...q.Matcher) Query {
return s.root.Select(matchers...)
}
// Range returns one or more records by the specified index within the specified range
func (s *DB) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
return s.root.Range(fieldName, min, max, to, options...)
}
// AllByIndex gets all the records of a bucket that are indexed in the specified index
func (s *DB) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
return s.root.AllByIndex(fieldName, to, options...)
}
// All get all the records of a bucket
func (s *DB) All(to interface{}, options ...func(*index.Options)) error {
return s.root.All(to, options...)
}
// Count counts all the records of a bucket
func (s *DB) Count(data interface{}) (int, error) {
return s.root.Count(data)
}
// DeleteStruct deletes a structure from the associated bucket
func (s *DB) DeleteStruct(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (s *DB) Remove(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Drop a bucket
func (s *DB) Drop(data interface{}) error {
return s.root.Drop(data)
}
// Find returns one or more records by the specified index
func (s *DB) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
return s.root.Find(fieldName, value, to, options...)
}
// Init creates the indexes and buckets for a given structure
func (s *DB) Init(data interface{}) error {
return s.root.Init(data)
}
// ReIndex rebuilds all the indexes of a bucket
func (s *DB) ReIndex(data interface{}) error {
return s.root.ReIndex(data)
}
// One returns one record by the specified index
func (s *DB) One(fieldName string, value interface{}, to interface{}) error {
return s.root.One(fieldName, value, to)
}
// Begin starts a new transaction.
func (s *DB) Begin(writable bool) (Node, error) {
return s.root.Begin(writable)
}
// Rollback closes the transaction and ignores all previous updates.
func (s *DB) Rollback() error {
return s.root.Rollback()
}
// Commit writes all changes to disk.
func (s *DB) Commit() error {
return s.root.Rollback()
}
// Update a structure
func (s *DB) Update(data interface{}) error {
return s.root.Update(data)
}
// UpdateField updates a single field
func (s *DB) UpdateField(data interface{}, fieldName string, value interface{}) error {
return s.root.UpdateField(data, fieldName, value)
}
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
func (s *DB) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
return s.root.CreateBucketIfNotExists(tx, bucket)
}
// GetBucket returns the given bucket below the current node.
func (s *DB) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
return s.root.GetBucket(tx, children...)
}
func (s *DB) checkVersion() error {
var v string
err := s.Get(dbinfo, "version", &v)
if err != nil && err != ErrNotFound {
return err
}
// for now, we only set the current version if it doesn't exist or if v0.5.0
if v == "" || v == "0.5.0" || v == "0.6.0" {
return s.Set(dbinfo, "version", Version)
}
return nil
}
// toBytes turns an interface into a slice of bytes
func toBytes(key interface{}, codec codec.MarshalUnmarshaler) ([]byte, error) {
if key == nil {
return nil, nil
}
switch t := key.(type) {
case []byte:
return t, nil
case string:
return []byte(t), nil
case int:
return numbertob(int64(t))
case uint:
return numbertob(uint64(t))
case int8, int16, int32, int64, uint8, uint16, uint32, uint64:
return numbertob(t)
default:
return codec.Marshal(key)
}
}
func numbertob(v interface{}) ([]byte, error) {
var buf bytes.Buffer
err := binary.Write(&buf, binary.BigEndian, v)
if err != nil {
return nil, err
}
return buf.Bytes(), nil
}
func numberfromb(raw []byte) (int64, error) {
r := bytes.NewReader(raw)
var to int64
err := binary.Read(r, binary.BigEndian, &to)
if err != nil {
return 0, err
}
return to, nil
}

52
vendor/github.com/asdine/storm/transaction.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package storm
import "github.com/boltdb/bolt"
// Tx is a transaction
type Tx interface {
// Commit writes all changes to disk.
Commit() error
// Rollback closes the transaction and ignores all previous updates.
Rollback() error
}
// Begin starts a new transaction.
func (n node) Begin(writable bool) (Node, error) {
var err error
n.tx, err = n.s.Bolt.Begin(writable)
if err != nil {
return nil, err
}
return &n, nil
}
// Rollback closes the transaction and ignores all previous updates.
func (n *node) Rollback() error {
if n.tx == nil {
return ErrNotInTransaction
}
err := n.tx.Rollback()
if err == bolt.ErrTxClosed {
return ErrNotInTransaction
}
return err
}
// Commit writes all changes to disk.
func (n *node) Commit() error {
if n.tx == nil {
return ErrNotInTransaction
}
err := n.tx.Commit()
if err == bolt.ErrTxClosed {
return ErrNotInTransaction
}
return err
}

4
vendor/github.com/asdine/storm/version.go generated vendored Normal file
View File

@ -0,0 +1,4 @@
package storm
// Version of Storm
const Version = "0.8.0"

10
vendor/github.com/boltdb/bolt/bolt_386.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

10
vendor/github.com/boltdb/bolt/bolt_amd64.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

28
vendor/github.com/boltdb/bolt/bolt_arm.go generated vendored Normal file
View File

@ -0,0 +1,28 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

12
vendor/github.com/boltdb/bolt/bolt_arm64.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build arm64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

10
vendor/github.com/boltdb/bolt/bolt_linux.go generated vendored Normal file
View File

@ -0,0 +1,10 @@
package bolt
import (
"syscall"
)
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return syscall.Fdatasync(int(db.file.Fd()))
}

27
vendor/github.com/boltdb/bolt/bolt_openbsd.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package bolt
import (
"syscall"
"unsafe"
)
const (
msAsync = 1 << iota // perform asynchronous writes
msSync // perform synchronous writes
msInvalidate // invalidate cached data
)
func msync(db *DB) error {
_, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate)
if errno != 0 {
return errno
}
return nil
}
func fdatasync(db *DB) error {
if db.data != nil {
return msync(db)
}
return db.file.Sync()
}

9
vendor/github.com/boltdb/bolt/bolt_ppc.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// +build ppc
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

12
vendor/github.com/boltdb/bolt/bolt_ppc64.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build ppc64
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

12
vendor/github.com/boltdb/bolt/bolt_ppc64le.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build ppc64le
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

12
vendor/github.com/boltdb/bolt/bolt_s390x.go generated vendored Normal file
View File

@ -0,0 +1,12 @@
// +build s390x
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

89
vendor/github.com/boltdb/bolt/bolt_unix.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
// +build !windows,!plan9,!solaris
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
flag := syscall.LOCK_SH
if exclusive {
flag = syscall.LOCK_EX
}
// Otherwise attempt to obtain an exclusive lock.
err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB)
if err == nil {
return nil
} else if err != syscall.EWOULDBLOCK {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := syscall.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}
// NOTE: This function is copied from stdlib because it is not available on darwin.
func madvise(b []byte, advice int) (err error) {
_, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice))
if e1 != 0 {
err = e1
}
return
}

90
vendor/github.com/boltdb/bolt/bolt_unix_solaris.go generated vendored Normal file
View File

@ -0,0 +1,90 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/unix"
)
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Pid = 0
lock.Whence = 0
lock.Pid = 0
if exclusive {
lock.Type = syscall.F_WRLCK
} else {
lock.Type = syscall.F_RDLCK
}
err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock)
if err == nil {
return nil
} else if err != syscall.EAGAIN {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
var lock syscall.Flock_t
lock.Start = 0
lock.Len = 0
lock.Type = syscall.F_UNLCK
lock.Whence = 0
return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock)
}
// mmap memory maps a DB's data file.
func mmap(db *DB, sz int) error {
// Map the data file to memory.
b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags)
if err != nil {
return err
}
// Advise the kernel that the mmap is accessed randomly.
if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil {
return fmt.Errorf("madvise: %s", err)
}
// Save the original byte slice and convert to a byte array pointer.
db.dataref = b
db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0]))
db.datasz = sz
return nil
}
// munmap unmaps a DB's data file from memory.
func munmap(db *DB) error {
// Ignore the unmap if we have no mapped data.
if db.dataref == nil {
return nil
}
// Unmap using the original byte slice.
err := unix.Munmap(db.dataref)
db.dataref = nil
db.data = nil
db.datasz = 0
return err
}

144
vendor/github.com/boltdb/bolt/bolt_windows.go generated vendored Normal file
View File

@ -0,0 +1,144 @@
package bolt
import (
"fmt"
"os"
"syscall"
"time"
"unsafe"
)
// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1
var (
modkernel32 = syscall.NewLazyDLL("kernel32.dll")
procLockFileEx = modkernel32.NewProc("LockFileEx")
procUnlockFileEx = modkernel32.NewProc("UnlockFileEx")
)
const (
lockExt = ".lock"
// see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx
flagLockExclusive = 2
flagLockFailImmediately = 1
// see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx
errLockViolation syscall.Errno = 0x21
)
func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)))
if r == 0 {
return err
}
return nil
}
func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) {
r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0)
if r == 0 {
return err
}
return nil
}
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}
// flock acquires an advisory lock on a file descriptor.
func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error {
// Create a separate lock file on windows because a process
// cannot share an exclusive lock on the same file. This is
// needed during Tx.WriteTo().
f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode)
if err != nil {
return err
}
db.lockfile = f
var t time.Time
for {
// If we're beyond our timeout then return an error.
// This can only occur after we've attempted a flock once.
if t.IsZero() {
t = time.Now()
} else if timeout > 0 && time.Since(t) > timeout {
return ErrTimeout
}
var flag uint32 = flagLockFailImmediately
if exclusive {
flag |= flagLockExclusive
}
err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{})
if err == nil {
return nil
} else if err != errLockViolation {
return err
}
// Wait for a bit and try again.
time.Sleep(50 * time.Millisecond)
}
}
// funlock releases an advisory lock on a file descriptor.
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path + lockExt)
return err
}
// mmap memory maps a DB's data file.
// Based on: https://github.com/edsrzf/mmap-go
func mmap(db *DB, sz int) error {
if !db.readOnly {
// Truncate the database to the size of the mmap.
if err := db.file.Truncate(int64(sz)); err != nil {
return fmt.Errorf("truncate: %s", err)
}
}
// Open a file mapping handle.
sizelo := uint32(sz >> 32)
sizehi := uint32(sz) & 0xffffffff
h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil)
if h == 0 {
return os.NewSyscallError("CreateFileMapping", errno)
}
// Create the memory map.
addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz))
if addr == 0 {
return os.NewSyscallError("MapViewOfFile", errno)
}
// Close mapping handle.
if err := syscall.CloseHandle(syscall.Handle(h)); err != nil {
return os.NewSyscallError("CloseHandle", err)
}
// Convert to a byte array.
db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr)))
db.datasz = sz
return nil
}
// munmap unmaps a pointer from a file.
// Based on: https://github.com/edsrzf/mmap-go
func munmap(db *DB) error {
if db.data == nil {
return nil
}
addr := (uintptr)(unsafe.Pointer(&db.data[0]))
if err := syscall.UnmapViewOfFile(addr); err != nil {
return os.NewSyscallError("UnmapViewOfFile", err)
}
return nil
}

8
vendor/github.com/boltdb/bolt/boltsync_unix.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// +build !windows,!plan9,!linux,!openbsd
package bolt
// fdatasync flushes written data to a file descriptor.
func fdatasync(db *DB) error {
return db.file.Sync()
}

777
vendor/github.com/boltdb/bolt/bucket.go generated vendored Normal file
View File

@ -0,0 +1,777 @@
package bolt
import (
"bytes"
"fmt"
"unsafe"
)
const (
// MaxKeySize is the maximum length of a key, in bytes.
MaxKeySize = 32768
// MaxValueSize is the maximum length of a value, in bytes.
MaxValueSize = (1 << 31) - 2
)
const (
maxUint = ^uint(0)
minUint = 0
maxInt = int(^uint(0) >> 1)
minInt = -maxInt - 1
)
const bucketHeaderSize = int(unsafe.Sizeof(bucket{}))
const (
minFillPercent = 0.1
maxFillPercent = 1.0
)
// DefaultFillPercent is the percentage that split pages are filled.
// This value can be changed by setting Bucket.FillPercent.
const DefaultFillPercent = 0.5
// Bucket represents a collection of key/value pairs inside the database.
type Bucket struct {
*bucket
tx *Tx // the associated transaction
buckets map[string]*Bucket // subbucket cache
page *page // inline page reference
rootNode *node // materialized node for the root page.
nodes map[pgid]*node // node cache
// Sets the threshold for filling nodes when they split. By default,
// the bucket will fill to 50% but it can be useful to increase this
// amount if you know that your write workloads are mostly append-only.
//
// This is non-persisted across transactions so it must be set in every Tx.
FillPercent float64
}
// bucket represents the on-file representation of a bucket.
// This is stored as the "value" of a bucket key. If the bucket is small enough,
// then its root page can be stored inline in the "value", after the bucket
// header. In the case of inline buckets, the "root" will be 0.
type bucket struct {
root pgid // page id of the bucket's root-level page
sequence uint64 // monotonically incrementing, used by NextSequence()
}
// newBucket returns a new bucket associated with a transaction.
func newBucket(tx *Tx) Bucket {
var b = Bucket{tx: tx, FillPercent: DefaultFillPercent}
if tx.writable {
b.buckets = make(map[string]*Bucket)
b.nodes = make(map[pgid]*node)
}
return b
}
// Tx returns the tx of the bucket.
func (b *Bucket) Tx() *Tx {
return b.tx
}
// Root returns the root of the bucket.
func (b *Bucket) Root() pgid {
return b.root
}
// Writable returns whether the bucket is writable.
func (b *Bucket) Writable() bool {
return b.tx.writable
}
// Cursor creates a cursor associated with the bucket.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (b *Bucket) Cursor() *Cursor {
// Update transaction statistics.
b.tx.stats.CursorCount++
// Allocate and return a cursor.
return &Cursor{
bucket: b,
stack: make([]elemRef, 0),
}
}
// Bucket retrieves a nested bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) Bucket(name []byte) *Bucket {
if b.buckets != nil {
if child := b.buckets[string(name)]; child != nil {
return child
}
}
// Move cursor to key.
c := b.Cursor()
k, v, flags := c.seek(name)
// Return nil if the key doesn't exist or it is not a bucket.
if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 {
return nil
}
// Otherwise create a bucket and cache it.
var child = b.openBucket(v)
if b.buckets != nil {
b.buckets[string(name)] = child
}
return child
}
// Helper method that re-interprets a sub-bucket value
// from a parent into a Bucket
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable && !unaligned {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
child.bucket = (*bucket)(unsafe.Pointer(&value[0]))
}
// Save a reference to the inline page if the bucket is inline.
if child.root == 0 {
child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
}
return &child
}
// CreateBucket creates a new bucket at the given key and returns the new bucket.
// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if b.tx.db == nil {
return nil, ErrTxClosed
} else if !b.tx.writable {
return nil, ErrTxNotWritable
} else if len(key) == 0 {
return nil, ErrBucketNameRequired
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key.
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
var bucket = Bucket{
bucket: &bucket{},
rootNode: &node{isLeaf: true},
FillPercent: DefaultFillPercent,
}
var value = bucket.write()
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, bucketLeafFlag)
// Since subbuckets are not allowed on inline buckets, we need to
// dereference the inline page, if it exists. This will cause the bucket
// to be treated as a regular, non-inline bucket for the rest of the tx.
b.page = nil
return b.Bucket(key), nil
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) {
child, err := b.CreateBucket(key)
if err == ErrBucketExists {
return b.Bucket(key), nil
} else if err != nil {
return nil, err
}
return child, nil
}
// DeleteBucket deletes a bucket at the given key.
// Returns an error if the bucket does not exists, or if the key represents a non-bucket value.
func (b *Bucket) DeleteBucket(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if bucket doesn't exist or is not a bucket.
if !bytes.Equal(key, k) {
return ErrBucketNotFound
} else if (flags & bucketLeafFlag) == 0 {
return ErrIncompatibleValue
}
// Recursively delete all child buckets.
child := b.Bucket(key)
err := child.ForEach(func(k, v []byte) error {
if v == nil {
if err := child.DeleteBucket(k); err != nil {
return fmt.Errorf("delete bucket: %s", err)
}
}
return nil
})
if err != nil {
return err
}
// Remove cached copy.
delete(b.buckets, string(key))
// Release all bucket pages to freelist.
child.nodes = nil
child.rootNode = nil
child.free()
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Get retrieves the value for a key in the bucket.
// Returns a nil value if the key does not exist or if the key is a nested bucket.
// The returned value is only valid for the life of the transaction.
func (b *Bucket) Get(key []byte) []byte {
k, v, flags := b.Cursor().seek(key)
// Return nil if this is a bucket.
if (flags & bucketLeafFlag) != 0 {
return nil
}
// If our target node isn't the same key as what's passed in then return nil.
if !bytes.Equal(key, k) {
return nil
}
return v
}
// Put sets the value for a key in the bucket.
// If the key exist then its previous value will be overwritten.
// Supplied value must remain valid for the life of the transaction.
// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large.
func (b *Bucket) Put(key []byte, value []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
} else if len(key) == 0 {
return ErrKeyRequired
} else if len(key) > MaxKeySize {
return ErrKeyTooLarge
} else if int64(len(value)) > MaxValueSize {
return ErrValueTooLarge
}
// Move cursor to correct position.
c := b.Cursor()
k, _, flags := c.seek(key)
// Return an error if there is an existing key with a bucket value.
if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Insert into node.
key = cloneBytes(key)
c.node().put(key, key, value, 0, 0)
return nil
}
// Delete removes a key from the bucket.
// If the key does not exist then nothing is done and a nil error is returned.
// Returns an error if the bucket was created from a read-only transaction.
func (b *Bucket) Delete(key []byte) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Move cursor to correct position.
c := b.Cursor()
_, _, flags := c.seek(key)
// Return an error if there is already existing bucket value.
if (flags & bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
// Delete the node if we have a matching key.
c.node().del(key)
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {
return 0, ErrTxClosed
} else if !b.Writable() {
return 0, ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence++
return b.bucket.sequence, nil
}
// ForEach executes a function for each key/value pair in a bucket.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller. The provided function must not modify
// the bucket; this will result in undefined behavior.
func (b *Bucket) ForEach(fn func(k, v []byte) error) error {
if b.tx.db == nil {
return ErrTxClosed
}
c := b.Cursor()
for k, v := c.First(); k != nil; k, v = c.Next() {
if err := fn(k, v); err != nil {
return err
}
}
return nil
}
// Stat returns stats on a bucket.
func (b *Bucket) Stats() BucketStats {
var s, subStats BucketStats
pageSize := b.tx.db.pageSize
s.BucketN += 1
if b.root == 0 {
s.InlineBucketN += 1
}
b.forEachPage(func(p *page, depth int) {
if (p.flags & leafPageFlag) != 0 {
s.KeyN += int(p.count)
// used totals the used bytes for the page
used := pageHeaderSize
if p.count != 0 {
// If page has any elements, add all element headers.
used += leafPageElementSize * int(p.count-1)
// Add all element key, value sizes.
// The computation takes advantage of the fact that the position
// of the last element's key/value equals to the total of the sizes
// of all previous elements' keys and values.
// It also includes the last element's header.
lastElement := p.leafPageElement(p.count - 1)
used += int(lastElement.pos + lastElement.ksize + lastElement.vsize)
}
if b.root == 0 {
// For inlined bucket just update the inline stats
s.InlineBucketInuse += used
} else {
// For non-inlined bucket update all the leaf stats
s.LeafPageN++
s.LeafInuse += used
s.LeafOverflowN += int(p.overflow)
// Collect stats from sub-buckets.
// Do that by iterating over all element headers
// looking for the ones with the bucketLeafFlag.
for i := uint16(0); i < p.count; i++ {
e := p.leafPageElement(i)
if (e.flags & bucketLeafFlag) != 0 {
// For any bucket element, open the element value
// and recursively call Stats on the contained bucket.
subStats.Add(b.openBucket(e.value()).Stats())
}
}
}
} else if (p.flags & branchPageFlag) != 0 {
s.BranchPageN++
lastElement := p.branchPageElement(p.count - 1)
// used totals the used bytes for the page
// Add header and all element headers.
used := pageHeaderSize + (branchPageElementSize * int(p.count-1))
// Add size of all keys and values.
// Again, use the fact that last element's position equals to
// the total of key, value sizes of all previous elements.
used += int(lastElement.pos + lastElement.ksize)
s.BranchInuse += used
s.BranchOverflowN += int(p.overflow)
}
// Keep track of maximum page depth.
if depth+1 > s.Depth {
s.Depth = (depth + 1)
}
})
// Alloc stats can be computed from page counts and pageSize.
s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize
s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize
// Add the max depth of sub-buckets to get total nested depth.
s.Depth += subStats.Depth
// Add the stats for all sub-buckets
s.Add(subStats)
return s
}
// forEachPage iterates over every page in a bucket, including inline pages.
func (b *Bucket) forEachPage(fn func(*page, int)) {
// If we have an inline page then just use that.
if b.page != nil {
fn(b.page, 0)
return
}
// Otherwise traverse the page hierarchy.
b.tx.forEachPage(b.root, 0, fn)
}
// forEachPageNode iterates over every page (or node) in a bucket.
// This also includes inline pages.
func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) {
// If we have an inline page or root node then just use that.
if b.page != nil {
fn(b.page, nil, 0)
return
}
b._forEachPageNode(b.root, 0, fn)
}
func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) {
var p, n = b.pageNode(pgid)
// Execute function.
fn(p, n, depth)
// Recursively loop over children.
if p != nil {
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
b._forEachPageNode(elem.pgid, depth+1, fn)
}
}
} else {
if !n.isLeaf {
for _, inode := range n.inodes {
b._forEachPageNode(inode.pgid, depth+1, fn)
}
}
}
}
// spill writes all the nodes for this bucket to dirty pages.
func (b *Bucket) spill() error {
// Spill all child buckets first.
for name, child := range b.buckets {
// If the child bucket is small enough and it has no child buckets then
// write it inline into the parent bucket's page. Otherwise spill it
// like a normal bucket and make the parent value a pointer to the page.
var value []byte
if child.inlineable() {
child.free()
value = child.write()
} else {
if err := child.spill(); err != nil {
return err
}
// Update the child bucket header in this bucket.
value = make([]byte, unsafe.Sizeof(bucket{}))
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *child.bucket
}
// Skip writing the bucket if there are no materialized nodes.
if child.rootNode == nil {
continue
}
// Update parent node.
var c = b.Cursor()
k, _, flags := c.seek([]byte(name))
if !bytes.Equal([]byte(name), k) {
panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k))
}
if flags&bucketLeafFlag == 0 {
panic(fmt.Sprintf("unexpected bucket header flag: %x", flags))
}
c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag)
}
// Ignore if there's not a materialized root node.
if b.rootNode == nil {
return nil
}
// Spill nodes.
if err := b.rootNode.spill(); err != nil {
return err
}
b.rootNode = b.rootNode.root()
// Update the root node for this bucket.
if b.rootNode.pgid >= b.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid))
}
b.root = b.rootNode.pgid
return nil
}
// inlineable returns true if a bucket is small enough to be written inline
// and if it contains no subbuckets. Otherwise returns false.
func (b *Bucket) inlineable() bool {
var n = b.rootNode
// Bucket must only contain a single leaf node.
if n == nil || !n.isLeaf {
return false
}
// Bucket is not inlineable if it contains subbuckets or if it goes beyond
// our threshold for inline bucket size.
var size = pageHeaderSize
for _, inode := range n.inodes {
size += leafPageElementSize + len(inode.key) + len(inode.value)
if inode.flags&bucketLeafFlag != 0 {
return false
} else if size > b.maxInlineBucketSize() {
return false
}
}
return true
}
// Returns the maximum total size of a bucket to make it a candidate for inlining.
func (b *Bucket) maxInlineBucketSize() int {
return b.tx.db.pageSize / 4
}
// write allocates and writes a bucket to a byte slice.
func (b *Bucket) write() []byte {
// Allocate the appropriate size.
var n = b.rootNode
var value = make([]byte, bucketHeaderSize+n.size())
// Write a bucket header.
var bucket = (*bucket)(unsafe.Pointer(&value[0]))
*bucket = *b.bucket
// Convert byte slice to a fake page and write the root node.
var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize]))
n.write(p)
return value
}
// rebalance attempts to balance all nodes.
func (b *Bucket) rebalance() {
for _, n := range b.nodes {
n.rebalance()
}
for _, child := range b.buckets {
child.rebalance()
}
}
// node creates a node from a page and associates it with a given parent.
func (b *Bucket) node(pgid pgid, parent *node) *node {
_assert(b.nodes != nil, "nodes map expected")
// Retrieve node if it's already been created.
if n := b.nodes[pgid]; n != nil {
return n
}
// Otherwise create a node and cache it.
n := &node{bucket: b, parent: parent}
if parent == nil {
b.rootNode = n
} else {
parent.children = append(parent.children, n)
}
// Use the inline page if this is an inline bucket.
var p = b.page
if p == nil {
p = b.tx.page(pgid)
}
// Read the page into the node and cache it.
n.read(p)
b.nodes[pgid] = n
// Update statistics.
b.tx.stats.NodeCount++
return n
}
// free recursively frees all pages in the bucket.
func (b *Bucket) free() {
if b.root == 0 {
return
}
var tx = b.tx
b.forEachPageNode(func(p *page, n *node, _ int) {
if p != nil {
tx.db.freelist.free(tx.meta.txid, p)
} else {
n.free()
}
})
b.root = 0
}
// dereference removes all references to the old mmap.
func (b *Bucket) dereference() {
if b.rootNode != nil {
b.rootNode.root().dereference()
}
for _, child := range b.buckets {
child.dereference()
}
}
// pageNode returns the in-memory node, if it exists.
// Otherwise returns the underlying page.
func (b *Bucket) pageNode(id pgid) (*page, *node) {
// Inline buckets have a fake page embedded in their value so treat them
// differently. We'll return the rootNode (if available) or the fake page.
if b.root == 0 {
if id != 0 {
panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id))
}
if b.rootNode != nil {
return nil, b.rootNode
}
return b.page, nil
}
// Check the node cache for non-inline buckets.
if b.nodes != nil {
if n := b.nodes[id]; n != nil {
return nil, n
}
}
// Finally lookup the page from the transaction if no node is materialized.
return b.tx.page(id), nil
}
// BucketStats records statistics about resources used by a bucket.
type BucketStats struct {
// Page count statistics.
BranchPageN int // number of logical branch pages
BranchOverflowN int // number of physical branch overflow pages
LeafPageN int // number of logical leaf pages
LeafOverflowN int // number of physical leaf overflow pages
// Tree statistics.
KeyN int // number of keys/value pairs
Depth int // number of levels in B+tree
// Page size utilization.
BranchAlloc int // bytes allocated for physical branch pages
BranchInuse int // bytes actually used for branch data
LeafAlloc int // bytes allocated for physical leaf pages
LeafInuse int // bytes actually used for leaf data
// Bucket statistics
BucketN int // total number of buckets including the top bucket
InlineBucketN int // total number on inlined buckets
InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse)
}
func (s *BucketStats) Add(other BucketStats) {
s.BranchPageN += other.BranchPageN
s.BranchOverflowN += other.BranchOverflowN
s.LeafPageN += other.LeafPageN
s.LeafOverflowN += other.LeafOverflowN
s.KeyN += other.KeyN
if s.Depth < other.Depth {
s.Depth = other.Depth
}
s.BranchAlloc += other.BranchAlloc
s.BranchInuse += other.BranchInuse
s.LeafAlloc += other.LeafAlloc
s.LeafInuse += other.LeafInuse
s.BucketN += other.BucketN
s.InlineBucketN += other.InlineBucketN
s.InlineBucketInuse += other.InlineBucketInuse
}
// cloneBytes returns a copy of a given slice.
func cloneBytes(v []byte) []byte {
var clone = make([]byte, len(v))
copy(clone, v)
return clone
}

400
vendor/github.com/boltdb/bolt/cursor.go generated vendored Normal file
View File

@ -0,0 +1,400 @@
package bolt
import (
"bytes"
"fmt"
"sort"
)
// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order.
// Cursors see nested buckets with value == nil.
// Cursors can be obtained from a transaction and are valid as long as the transaction is open.
//
// Keys and values returned from the cursor are only valid for the life of the transaction.
//
// Changing data while traversing with a cursor may cause it to be invalidated
// and return unexpected keys and/or values. You must reposition your cursor
// after mutating data.
type Cursor struct {
bucket *Bucket
stack []elemRef
}
// Bucket returns the bucket that this cursor was created from.
func (c *Cursor) Bucket() *Bucket {
return c.bucket
}
// First moves the cursor to the first item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) First() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
c.first()
// If we land on an empty page then move to the next value.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
c.next()
}
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Last moves the cursor to the last item in the bucket and returns its key and value.
// If the bucket is empty then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Last() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
c.stack = c.stack[:0]
p, n := c.bucket.pageNode(c.bucket.root)
ref := elemRef{page: p, node: n}
ref.index = ref.count() - 1
c.stack = append(c.stack, ref)
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Next moves the cursor to the next item in the bucket and returns its key and value.
// If the cursor is at the end of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Next() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
k, v, flags := c.next()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Prev moves the cursor to the previous item in the bucket and returns its key and value.
// If the cursor is at the beginning of the bucket then a nil key and value are returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Prev() (key []byte, value []byte) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Attempt to move back one element until we're successful.
// Move up the stack as we hit the beginning of each page in our stack.
for i := len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index > 0 {
elem.index--
break
}
c.stack = c.stack[:i]
}
// If we've hit the end then return nil.
if len(c.stack) == 0 {
return nil, nil
}
// Move down the stack to find the last element of the last leaf under this branch.
c.last()
k, v, flags := c.keyValue()
if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used. If no keys
// follow, a nil key is returned.
// The returned key and value are only valid for the life of the transaction.
func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) {
k, v, flags := c.seek(seek)
// If we ended up after the last element of a page then move to the next one.
if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() {
k, v, flags = c.next()
}
if k == nil {
return nil, nil
} else if (flags & uint32(bucketLeafFlag)) != 0 {
return k, nil
}
return k, v
}
// Delete removes the current key/value under the cursor from the bucket.
// Delete fails if current key/value is a bucket or if the transaction is not writable.
func (c *Cursor) Delete() error {
if c.bucket.tx.db == nil {
return ErrTxClosed
} else if !c.bucket.Writable() {
return ErrTxNotWritable
}
key, _, flags := c.keyValue()
// Return an error if current value is a bucket.
if (flags & bucketLeafFlag) != 0 {
return ErrIncompatibleValue
}
c.node().del(key)
return nil
}
// seek moves the cursor to a given key and returns it.
// If the key does not exist then the next key is used.
func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) {
_assert(c.bucket.tx.db != nil, "tx closed")
// Start from root page/node and traverse to correct page.
c.stack = c.stack[:0]
c.search(seek, c.bucket.root)
ref := &c.stack[len(c.stack)-1]
// If the cursor is pointing to the end of page/node then return nil.
if ref.index >= ref.count() {
return nil, nil, 0
}
// If this is a bucket then return a nil value.
return c.keyValue()
}
// first moves the cursor to the first leaf element under the last page in the stack.
func (c *Cursor) first() {
for {
// Exit when we hit a leaf page.
var ref = &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the first element to the stack.
var pgid pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
c.stack = append(c.stack, elemRef{page: p, node: n, index: 0})
}
}
// last moves the cursor to the last leaf element under the last page in the stack.
func (c *Cursor) last() {
for {
// Exit when we hit a leaf page.
ref := &c.stack[len(c.stack)-1]
if ref.isLeaf() {
break
}
// Keep adding pages pointing to the last element in the stack.
var pgid pgid
if ref.node != nil {
pgid = ref.node.inodes[ref.index].pgid
} else {
pgid = ref.page.branchPageElement(uint16(ref.index)).pgid
}
p, n := c.bucket.pageNode(pgid)
var nextRef = elemRef{page: p, node: n}
nextRef.index = nextRef.count() - 1
c.stack = append(c.stack, nextRef)
}
}
// next moves to the next leaf element and returns the key and value.
// If the cursor is at the last leaf element then it stays there and returns nil.
func (c *Cursor) next() (key []byte, value []byte, flags uint32) {
for {
// Attempt to move over one element until we're successful.
// Move up the stack as we hit the end of each page in our stack.
var i int
for i = len(c.stack) - 1; i >= 0; i-- {
elem := &c.stack[i]
if elem.index < elem.count()-1 {
elem.index++
break
}
}
// If we've hit the root page then stop and return. This will leave the
// cursor on the last element of the last page.
if i == -1 {
return nil, nil, 0
}
// Otherwise start from where we left off in the stack and find the
// first element of the first leaf page.
c.stack = c.stack[:i+1]
c.first()
// If this is an empty page then restart and move back up the stack.
// https://github.com/boltdb/bolt/issues/450
if c.stack[len(c.stack)-1].count() == 0 {
continue
}
return c.keyValue()
}
}
// search recursively performs a binary search against a given page/node until it finds a given key.
func (c *Cursor) search(key []byte, pgid pgid) {
p, n := c.bucket.pageNode(pgid)
if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 {
panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags))
}
e := elemRef{page: p, node: n}
c.stack = append(c.stack, e)
// If we're on a leaf page/node then find the specific node.
if e.isLeaf() {
c.nsearch(key)
return
}
if n != nil {
c.searchNode(key, n)
return
}
c.searchPage(key, p)
}
func (c *Cursor) searchNode(key []byte, n *node) {
var exact bool
index := sort.Search(len(n.inodes), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(n.inodes[i].key, key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, n.inodes[index].pgid)
}
func (c *Cursor) searchPage(key []byte, p *page) {
// Binary search for the correct range.
inodes := p.branchPageElements()
var exact bool
index := sort.Search(int(p.count), func(i int) bool {
// TODO(benbjohnson): Optimize this range search. It's a bit hacky right now.
// sort.Search() finds the lowest index where f() != -1 but we need the highest index.
ret := bytes.Compare(inodes[i].key(), key)
if ret == 0 {
exact = true
}
return ret != -1
})
if !exact && index > 0 {
index--
}
c.stack[len(c.stack)-1].index = index
// Recursively search to the next page.
c.search(key, inodes[index].pgid)
}
// nsearch searches the leaf node on the top of the stack for a key.
func (c *Cursor) nsearch(key []byte) {
e := &c.stack[len(c.stack)-1]
p, n := e.page, e.node
// If we have a node then search its inodes.
if n != nil {
index := sort.Search(len(n.inodes), func(i int) bool {
return bytes.Compare(n.inodes[i].key, key) != -1
})
e.index = index
return
}
// If we have a page then search its leaf elements.
inodes := p.leafPageElements()
index := sort.Search(int(p.count), func(i int) bool {
return bytes.Compare(inodes[i].key(), key) != -1
})
e.index = index
}
// keyValue returns the key and value of the current leaf element.
func (c *Cursor) keyValue() ([]byte, []byte, uint32) {
ref := &c.stack[len(c.stack)-1]
if ref.count() == 0 || ref.index >= ref.count() {
return nil, nil, 0
}
// Retrieve value from node.
if ref.node != nil {
inode := &ref.node.inodes[ref.index]
return inode.key, inode.value, inode.flags
}
// Or retrieve value from page.
elem := ref.page.leafPageElement(uint16(ref.index))
return elem.key(), elem.value(), elem.flags
}
// node returns the node that the cursor is currently positioned on.
func (c *Cursor) node() *node {
_assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack")
// If the top of the stack is a leaf node then just return it.
if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() {
return ref.node
}
// Start from root and traverse down the hierarchy.
var n = c.stack[0].node
if n == nil {
n = c.bucket.node(c.stack[0].page.id, nil)
}
for _, ref := range c.stack[:len(c.stack)-1] {
_assert(!n.isLeaf, "expected branch node")
n = n.childAt(int(ref.index))
}
_assert(n.isLeaf, "expected leaf node")
return n
}
// elemRef represents a reference to an element on a given page/node.
type elemRef struct {
page *page
node *node
index int
}
// isLeaf returns whether the ref is pointing at a leaf page/node.
func (r *elemRef) isLeaf() bool {
if r.node != nil {
return r.node.isLeaf
}
return (r.page.flags & leafPageFlag) != 0
}
// count returns the number of inodes or page elements.
func (r *elemRef) count() int {
if r.node != nil {
return len(r.node.inodes)
}
return int(r.page.count)
}

1039
vendor/github.com/boltdb/bolt/db.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

44
vendor/github.com/boltdb/bolt/doc.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
/*
Package bolt implements a low-level key/value store in pure Go. It supports
fully serializable transactions, ACID semantics, and lock-free MVCC with
multiple readers and a single writer. Bolt can be used for projects that
want a simple data store without the need to add large dependencies such as
Postgres or MySQL.
Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is
optimized for fast read access and does not require recovery in the event of a
system crash. Transactions which have not finished committing will simply be
rolled back in the event of a crash.
The design of Bolt is based on Howard Chu's LMDB database project.
Bolt currently works on Windows, Mac OS X, and Linux.
Basics
There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is
a collection of buckets and is represented by a single file on disk. A bucket is
a collection of unique keys that are associated with values.
Transactions provide either read-only or read-write access to the database.
Read-only transactions can retrieve key/value pairs and can use Cursors to
iterate over the dataset sequentially. Read-write transactions can create and
delete buckets and can insert and remove keys. Only one read-write transaction
is allowed at a time.
Caveats
The database uses a read-only, memory-mapped data file to ensure that
applications cannot corrupt the database, however, this means that keys and
values returned from Bolt cannot be changed. Writing to a read-only byte slice
will cause Go to panic.
Keys and values retrieved from the database are only valid for the life of
the transaction. When used outside the transaction, these byte slices can
point to different data or can point to invalid memory which will cause a panic.
*/
package bolt

71
vendor/github.com/boltdb/bolt/errors.go generated vendored Normal file
View File

@ -0,0 +1,71 @@
package bolt
import "errors"
// These errors can be returned when opening or calling methods on a DB.
var (
// ErrDatabaseNotOpen is returned when a DB instance is accessed before it
// is opened or after it is closed.
ErrDatabaseNotOpen = errors.New("database not open")
// ErrDatabaseOpen is returned when opening a database that is
// already open.
ErrDatabaseOpen = errors.New("database already open")
// ErrInvalid is returned when both meta pages on a database are invalid.
// This typically occurs when a file is not a bolt database.
ErrInvalid = errors.New("invalid database")
// ErrVersionMismatch is returned when the data file was created with a
// different version of Bolt.
ErrVersionMismatch = errors.New("version mismatch")
// ErrChecksum is returned when either meta page checksum does not match.
ErrChecksum = errors.New("checksum error")
// ErrTimeout is returned when a database cannot obtain an exclusive lock
// on the data file after the timeout passed to Open().
ErrTimeout = errors.New("timeout")
)
// These errors can occur when beginning or committing a Tx.
var (
// ErrTxNotWritable is returned when performing a write operation on a
// read-only transaction.
ErrTxNotWritable = errors.New("tx not writable")
// ErrTxClosed is returned when committing or rolling back a transaction
// that has already been committed or rolled back.
ErrTxClosed = errors.New("tx closed")
// ErrDatabaseReadOnly is returned when a mutating transaction is started on a
// read-only database.
ErrDatabaseReadOnly = errors.New("database is in read-only mode")
)
// These errors can occur when putting or deleting a value or a bucket.
var (
// ErrBucketNotFound is returned when trying to access a bucket that has
// not been created yet.
ErrBucketNotFound = errors.New("bucket not found")
// ErrBucketExists is returned when creating a bucket that already exists.
ErrBucketExists = errors.New("bucket already exists")
// ErrBucketNameRequired is returned when creating a bucket with a blank name.
ErrBucketNameRequired = errors.New("bucket name required")
// ErrKeyRequired is returned when inserting a zero-length key.
ErrKeyRequired = errors.New("key required")
// ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize.
ErrKeyTooLarge = errors.New("key too large")
// ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize.
ErrValueTooLarge = errors.New("value too large")
// ErrIncompatibleValue is returned when trying create or delete a bucket
// on an existing non-bucket key or when trying to create or delete a
// non-bucket key on an existing bucket key.
ErrIncompatibleValue = errors.New("incompatible value")
)

252
vendor/github.com/boltdb/bolt/freelist.go generated vendored Normal file
View File

@ -0,0 +1,252 @@
package bolt
import (
"fmt"
"sort"
"unsafe"
)
// freelist represents a list of all pages that are available for allocation.
// It also tracks pages that have been freed but are still in use by open transactions.
type freelist struct {
ids []pgid // all free and available free page ids.
pending map[txid][]pgid // mapping of soon-to-be free page ids by tx.
cache map[pgid]bool // fast lookup of all free and pending page ids.
}
// newFreelist returns an empty, initialized freelist.
func newFreelist() *freelist {
return &freelist{
pending: make(map[txid][]pgid),
cache: make(map[pgid]bool),
}
}
// size returns the size of the page after serialization.
func (f *freelist) size() int {
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
}
// count returns count of pages on the freelist
func (f *freelist) count() int {
return f.free_count() + f.pending_count()
}
// free_count returns count of free pages
func (f *freelist) free_count() int {
return len(f.ids)
}
// pending_count returns count of pending pages
func (f *freelist) pending_count() int {
var count int
for _, list := range f.pending {
count += len(list)
}
return count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending {
m = append(m, list...)
}
sort.Sort(m)
mergepgids(dst, f.ids, m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
// If a contiguous block cannot be found then 0 is returned.
func (f *freelist) allocate(n int) pgid {
if len(f.ids) == 0 {
return 0
}
var initial, previd pgid
for i, id := range f.ids {
if id <= 1 {
panic(fmt.Sprintf("invalid page allocation: %d", id))
}
// Reset initial page if this is not contiguous.
if previd == 0 || id-previd != 1 {
initial = id
}
// If we found a contiguous block then remove it and return it.
if (id-initial)+1 == pgid(n) {
// If we're allocating off the beginning then take the fast path
// and just adjust the existing slice. This will use extra memory
// temporarily but the append() in free() will realloc the slice
// as is necessary.
if (i + 1) == n {
f.ids = f.ids[i+1:]
} else {
copy(f.ids[i-n+1:], f.ids[i+1:])
f.ids = f.ids[:len(f.ids)-n]
}
// Remove from the free cache.
for i := pgid(0); i < pgid(n); i++ {
delete(f.cache, initial+i)
}
return initial
}
previd = id
}
return 0
}
// free releases a page and its overflow for a given transaction id.
// If the page is already free then a panic will occur.
func (f *freelist) free(txid txid, p *page) {
if p.id <= 1 {
panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id))
}
// Free page and all its overflow pages.
var ids = f.pending[txid]
for id := p.id; id <= p.id+pgid(p.overflow); id++ {
// Verify that page is not already free.
if f.cache[id] {
panic(fmt.Sprintf("page %d already freed", id))
}
// Add to the freelist and cache.
ids = append(ids, id)
f.cache[id] = true
}
f.pending[txid] = ids
}
// release moves all page ids for a transaction id (or older) to the freelist.
func (f *freelist) release(txid txid) {
m := make(pgids, 0)
for tid, ids := range f.pending {
if tid <= txid {
// Move transaction's pending pages to the available freelist.
// Don't remove from the cache since the page is still free.
m = append(m, ids...)
delete(f.pending, tid)
}
}
sort.Sort(m)
f.ids = pgids(f.ids).merge(m)
}
// rollback removes the pages from a given pending tx.
func (f *freelist) rollback(txid txid) {
// Remove page ids from cache.
for _, id := range f.pending[txid] {
delete(f.cache, id)
}
// Remove pages from pending list.
delete(f.pending, txid)
}
// freed returns whether a given page is in the free list.
func (f *freelist) freed(pgid pgid) bool {
return f.cache[pgid]
}
// read initializes the freelist from a freelist page.
func (f *freelist) read(p *page) {
// If the page.count is at the max uint16 value (64k) then it's considered
// an overflow and the size of the freelist is stored as the first element.
idx, count := 0, int(p.count)
if count == 0xFFFF {
idx = 1
count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0])
}
// Copy the list of page ids from the freelist.
if count == 0 {
f.ids = nil
} else {
ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count]
f.ids = make([]pgid, len(ids))
copy(f.ids, ids)
// Make sure they're sorted.
sort.Sort(pgids(f.ids))
}
// Rebuild the page cache.
f.reindex()
}
// write writes the page ids onto a freelist page. All free and pending ids are
// saved to disk since in the event of a program crash, all pending ids will
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
}
return nil
}
// reload reads the freelist from a page and filters out pending items.
func (f *freelist) reload(p *page) {
f.read(p)
// Build a cache of only pending pages.
pcache := make(map[pgid]bool)
for _, pendingIDs := range f.pending {
for _, pendingID := range pendingIDs {
pcache[pendingID] = true
}
}
// Check each page in the freelist and build a new available freelist
// with any pages not in the pending lists.
var a []pgid
for _, id := range f.ids {
if !pcache[id] {
a = append(a, id)
}
}
f.ids = a
// Once the available list is rebuilt then rebuild the free cache so that
// it includes the available and pending free pages.
f.reindex()
}
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool, len(f.ids))
for _, id := range f.ids {
f.cache[id] = true
}
for _, pendingIDs := range f.pending {
for _, pendingID := range pendingIDs {
f.cache[pendingID] = true
}
}
}

604
vendor/github.com/boltdb/bolt/node.go generated vendored Normal file
View File

@ -0,0 +1,604 @@
package bolt
import (
"bytes"
"fmt"
"sort"
"unsafe"
)
// node represents an in-memory, deserialized page.
type node struct {
bucket *Bucket
isLeaf bool
unbalanced bool
spilled bool
key []byte
pgid pgid
parent *node
children nodes
inodes inodes
}
// root returns the top-level node this node is attached to.
func (n *node) root() *node {
if n.parent == nil {
return n
}
return n.parent.root()
}
// minKeys returns the minimum number of inodes this node should have.
func (n *node) minKeys() int {
if n.isLeaf {
return 1
}
return 2
}
// size returns the size of the node after serialization.
func (n *node) size() int {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + len(item.key) + len(item.value)
}
return sz
}
// sizeLessThan returns true if the node is less than a given size.
// This is an optimization to avoid calculating a large node when we only need
// to know if it fits inside a certain page size.
func (n *node) sizeLessThan(v int) bool {
sz, elsz := pageHeaderSize, n.pageElementSize()
for i := 0; i < len(n.inodes); i++ {
item := &n.inodes[i]
sz += elsz + len(item.key) + len(item.value)
if sz >= v {
return false
}
}
return true
}
// pageElementSize returns the size of each page element based on the type of node.
func (n *node) pageElementSize() int {
if n.isLeaf {
return leafPageElementSize
}
return branchPageElementSize
}
// childAt returns the child node at a given index.
func (n *node) childAt(index int) *node {
if n.isLeaf {
panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index))
}
return n.bucket.node(n.inodes[index].pgid, n)
}
// childIndex returns the index of a given child node.
func (n *node) childIndex(child *node) int {
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 })
return index
}
// numChildren returns the number of children.
func (n *node) numChildren() int {
return len(n.inodes)
}
// nextSibling returns the next node with the same parent.
func (n *node) nextSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index >= n.parent.numChildren()-1 {
return nil
}
return n.parent.childAt(index + 1)
}
// prevSibling returns the previous node with the same parent.
func (n *node) prevSibling() *node {
if n.parent == nil {
return nil
}
index := n.parent.childIndex(n)
if index == 0 {
return nil
}
return n.parent.childAt(index - 1)
}
// put inserts a key/value.
func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) {
if pgid >= n.bucket.tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid))
} else if len(oldKey) <= 0 {
panic("put: zero-length old key")
} else if len(newKey) <= 0 {
panic("put: zero-length new key")
}
// Find insertion index.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 })
// Add capacity and shift nodes if we don't have an exact match and need to insert.
exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey))
if !exact {
n.inodes = append(n.inodes, inode{})
copy(n.inodes[index+1:], n.inodes[index:])
}
inode := &n.inodes[index]
inode.flags = flags
inode.key = newKey
inode.value = value
inode.pgid = pgid
_assert(len(inode.key) > 0, "put: zero-length inode key")
}
// del removes a key from the node.
func (n *node) del(key []byte) {
// Find index of key.
index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 })
// Exit if the key isn't found.
if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) {
return
}
// Delete inode from the node.
n.inodes = append(n.inodes[:index], n.inodes[index+1:]...)
// Mark the node as needing rebalancing.
n.unbalanced = true
}
// read initializes the node from a page.
func (n *node) read(p *page) {
n.pgid = p.id
n.isLeaf = ((p.flags & leafPageFlag) != 0)
n.inodes = make(inodes, int(p.count))
for i := 0; i < int(p.count); i++ {
inode := &n.inodes[i]
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
inode.flags = elem.flags
inode.key = elem.key()
inode.value = elem.value()
} else {
elem := p.branchPageElement(uint16(i))
inode.pgid = elem.pgid
inode.key = elem.key()
}
_assert(len(inode.key) > 0, "read: zero-length inode key")
}
// Save first key so we can find the node in the parent when we spill.
if len(n.inodes) > 0 {
n.key = n.inodes[0].key
_assert(len(n.key) > 0, "read: zero-length node key")
} else {
n.key = nil
}
}
// write writes the items onto one or more pages.
func (n *node) write(p *page) {
// Initialize page.
if n.isLeaf {
p.flags |= leafPageFlag
} else {
p.flags |= branchPageFlag
}
if len(n.inodes) >= 0xFFFF {
panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id))
}
p.count = uint16(len(n.inodes))
// Stop here if there are no items to write.
if p.count == 0 {
return
}
// Loop over each item and write it to the page.
b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):]
for i, item := range n.inodes {
_assert(len(item.key) > 0, "write: zero-length inode key")
// Write the page element.
if n.isLeaf {
elem := p.leafPageElement(uint16(i))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.flags = item.flags
elem.ksize = uint32(len(item.key))
elem.vsize = uint32(len(item.value))
} else {
elem := p.branchPageElement(uint16(i))
elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem)))
elem.ksize = uint32(len(item.key))
elem.pgid = item.pgid
_assert(elem.pgid != p.id, "write: circular dependency occurred")
}
// If the length of key+value is larger than the max allocation size
// then we need to reallocate the byte array pointer.
//
// See: https://github.com/boltdb/bolt/pull/335
klen, vlen := len(item.key), len(item.value)
if len(b) < klen+vlen {
b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:]
}
// Write data for the element to the end of the page.
copy(b[0:], item.key)
b = b[klen:]
copy(b[0:], item.value)
b = b[vlen:]
}
// DEBUG ONLY: n.dump()
}
// split breaks up a node into multiple smaller nodes, if appropriate.
// This should only be called from the spill() function.
func (n *node) split(pageSize int) []*node {
var nodes []*node
node := n
for {
// Split node into two.
a, b := node.splitTwo(pageSize)
nodes = append(nodes, a)
// If we can't split then exit the loop.
if b == nil {
break
}
// Set node to b so it gets split on the next iteration.
node = b
}
return nodes
}
// splitTwo breaks up a node into two smaller nodes, if appropriate.
// This should only be called from the split() function.
func (n *node) splitTwo(pageSize int) (*node, *node) {
// Ignore the split if the page doesn't have at least enough nodes for
// two pages or if the nodes can fit in a single page.
if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) {
return n, nil
}
// Determine the threshold before starting a new node.
var fillPercent = n.bucket.FillPercent
if fillPercent < minFillPercent {
fillPercent = minFillPercent
} else if fillPercent > maxFillPercent {
fillPercent = maxFillPercent
}
threshold := int(float64(pageSize) * fillPercent)
// Determine split position and sizes of the two pages.
splitIndex, _ := n.splitIndex(threshold)
// Split node into two separate nodes.
// If there's no parent then we'll need to create one.
if n.parent == nil {
n.parent = &node{bucket: n.bucket, children: []*node{n}}
}
// Create a new node and add it to the parent.
next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent}
n.parent.children = append(n.parent.children, next)
// Split inodes across two nodes.
next.inodes = n.inodes[splitIndex:]
n.inodes = n.inodes[:splitIndex]
// Update the statistics.
n.bucket.tx.stats.Split++
return n, next
}
// splitIndex finds the position where a page will fill a given threshold.
// It returns the index as well as the size of the first page.
// This is only be called from split().
func (n *node) splitIndex(threshold int) (index, sz int) {
sz = pageHeaderSize
// Loop until we only have the minimum number of keys required for the second page.
for i := 0; i < len(n.inodes)-minKeysPerPage; i++ {
index = i
inode := n.inodes[i]
elsize := n.pageElementSize() + len(inode.key) + len(inode.value)
// If we have at least the minimum number of keys and adding another
// node would put us over the threshold then exit and return.
if i >= minKeysPerPage && sz+elsize > threshold {
break
}
// Add the element size to the total size.
sz += elsize
}
return
}
// spill writes the nodes to dirty pages and splits nodes as it goes.
// Returns an error if dirty pages cannot be allocated.
func (n *node) spill() error {
var tx = n.bucket.tx
if n.spilled {
return nil
}
// Spill child nodes first. Child nodes can materialize sibling nodes in
// the case of split-merge so we cannot use a range loop. We have to check
// the children size on every loop iteration.
sort.Sort(n.children)
for i := 0; i < len(n.children); i++ {
if err := n.children[i].spill(); err != nil {
return err
}
}
// We no longer need the child list because it's only used for spill tracking.
n.children = nil
// Split nodes into appropriate sizes. The first node will always be n.
var nodes = n.split(tx.db.pageSize)
for _, node := range nodes {
// Add node's page to the freelist if it's not new.
if node.pgid > 0 {
tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid))
node.pgid = 0
}
// Allocate contiguous space for the node.
p, err := tx.allocate((node.size() / tx.db.pageSize) + 1)
if err != nil {
return err
}
// Write the node.
if p.id >= tx.meta.pgid {
panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid))
}
node.pgid = p.id
node.write(p)
node.spilled = true
// Insert into parent inodes.
if node.parent != nil {
var key = node.key
if key == nil {
key = node.inodes[0].key
}
node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0)
node.key = node.inodes[0].key
_assert(len(node.key) > 0, "spill: zero-length node key")
}
// Update the statistics.
tx.stats.Spill++
}
// If the root node split and created a new root then we need to spill that
// as well. We'll clear out the children to make sure it doesn't try to respill.
if n.parent != nil && n.parent.pgid == 0 {
n.children = nil
return n.parent.spill()
}
return nil
}
// rebalance attempts to combine the node with sibling nodes if the node fill
// size is below a threshold or if there are not enough keys.
func (n *node) rebalance() {
if !n.unbalanced {
return
}
n.unbalanced = false
// Update statistics.
n.bucket.tx.stats.Rebalance++
// Ignore if node is above threshold (25%) and has enough keys.
var threshold = n.bucket.tx.db.pageSize / 4
if n.size() > threshold && len(n.inodes) > n.minKeys() {
return
}
// Root node has special handling.
if n.parent == nil {
// If root node is a branch and only has one node then collapse it.
if !n.isLeaf && len(n.inodes) == 1 {
// Move root's child up.
child := n.bucket.node(n.inodes[0].pgid, n)
n.isLeaf = child.isLeaf
n.inodes = child.inodes[:]
n.children = child.children
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent = n
}
}
// Remove old child.
child.parent = nil
delete(n.bucket.nodes, child.pgid)
child.free()
}
return
}
// If node has no keys then just remove it.
if n.numChildren() == 0 {
n.parent.del(n.key)
n.parent.removeChild(n)
delete(n.bucket.nodes, n.pgid)
n.free()
n.parent.rebalance()
return
}
_assert(n.parent.numChildren() > 1, "parent must have at least 2 children")
// Destination node is right sibling if idx == 0, otherwise left sibling.
var target *node
var useNextSibling = (n.parent.childIndex(n) == 0)
if useNextSibling {
target = n.nextSibling()
} else {
target = n.prevSibling()
}
// If both this node and the target node are too small then merge them.
if useNextSibling {
// Reparent all child nodes being moved.
for _, inode := range target.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent.removeChild(child)
child.parent = n
child.parent.children = append(child.parent.children, child)
}
}
// Copy over inodes from target and remove target.
n.inodes = append(n.inodes, target.inodes...)
n.parent.del(target.key)
n.parent.removeChild(target)
delete(n.bucket.nodes, target.pgid)
target.free()
} else {
// Reparent all child nodes being moved.
for _, inode := range n.inodes {
if child, ok := n.bucket.nodes[inode.pgid]; ok {
child.parent.removeChild(child)
child.parent = target
child.parent.children = append(child.parent.children, child)
}
}
// Copy over inodes to target and remove node.
target.inodes = append(target.inodes, n.inodes...)
n.parent.del(n.key)
n.parent.removeChild(n)
delete(n.bucket.nodes, n.pgid)
n.free()
}
// Either this node or the target node was deleted from the parent so rebalance it.
n.parent.rebalance()
}
// removes a node from the list of in-memory children.
// This does not affect the inodes.
func (n *node) removeChild(target *node) {
for i, child := range n.children {
if child == target {
n.children = append(n.children[:i], n.children[i+1:]...)
return
}
}
}
// dereference causes the node to copy all its inode key/value references to heap memory.
// This is required when the mmap is reallocated so inodes are not pointing to stale data.
func (n *node) dereference() {
if n.key != nil {
key := make([]byte, len(n.key))
copy(key, n.key)
n.key = key
_assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node")
}
for i := range n.inodes {
inode := &n.inodes[i]
key := make([]byte, len(inode.key))
copy(key, inode.key)
inode.key = key
_assert(len(inode.key) > 0, "dereference: zero-length inode key")
value := make([]byte, len(inode.value))
copy(value, inode.value)
inode.value = value
}
// Recursively dereference children.
for _, child := range n.children {
child.dereference()
}
// Update statistics.
n.bucket.tx.stats.NodeDeref++
}
// free adds the node's underlying page to the freelist.
func (n *node) free() {
if n.pgid != 0 {
n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid))
n.pgid = 0
}
}
// dump writes the contents of the node to STDERR for debugging purposes.
/*
func (n *node) dump() {
// Write node header.
var typ = "branch"
if n.isLeaf {
typ = "leaf"
}
warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes))
// Write out abbreviated version of each item.
for _, item := range n.inodes {
if n.isLeaf {
if item.flags&bucketLeafFlag != 0 {
bucket := (*bucket)(unsafe.Pointer(&item.value[0]))
warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root)
} else {
warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4))
}
} else {
warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid)
}
}
warn("")
}
*/
type nodes []*node
func (s nodes) Len() int { return len(s) }
func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 }
// inode represents an internal node inside of a node.
// It can be used to point to elements in a page or point
// to an element which hasn't been added to a page yet.
type inode struct {
flags uint32
pgid pgid
key []byte
value []byte
}
type inodes []inode

197
vendor/github.com/boltdb/bolt/page.go generated vendored Normal file
View File

@ -0,0 +1,197 @@
package bolt
import (
"fmt"
"os"
"sort"
"unsafe"
)
const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr))
const minKeysPerPage = 2
const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{}))
const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{}))
const (
branchPageFlag = 0x01
leafPageFlag = 0x02
metaPageFlag = 0x04
freelistPageFlag = 0x10
)
const (
bucketLeafFlag = 0x01
)
type pgid uint64
type page struct {
id pgid
flags uint16
count uint16
overflow uint32
ptr uintptr
}
// typ returns a human readable page type string used for debugging.
func (p *page) typ() string {
if (p.flags & branchPageFlag) != 0 {
return "branch"
} else if (p.flags & leafPageFlag) != 0 {
return "leaf"
} else if (p.flags & metaPageFlag) != 0 {
return "meta"
} else if (p.flags & freelistPageFlag) != 0 {
return "freelist"
}
return fmt.Sprintf("unknown<%02x>", p.flags)
}
// meta returns a pointer to the metadata section of the page.
func (p *page) meta() *meta {
return (*meta)(unsafe.Pointer(&p.ptr))
}
// leafPageElement retrieves the leaf node by index
func (p *page) leafPageElement(index uint16) *leafPageElement {
n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index]
return n
}
// leafPageElements retrieves a list of leaf nodes.
func (p *page) leafPageElements() []leafPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
// branchPageElement retrieves the branch node by index
func (p *page) branchPageElement(index uint16) *branchPageElement {
return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index]
}
// branchPageElements retrieves a list of branch nodes.
func (p *page) branchPageElements() []branchPageElement {
if p.count == 0 {
return nil
}
return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:]
}
// dump writes n bytes of the page to STDERR as hex output.
func (p *page) hexdump(n int) {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n]
fmt.Fprintf(os.Stderr, "%x\n", buf)
}
type pages []*page
func (s pages) Len() int { return len(s) }
func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s pages) Less(i, j int) bool { return s[i].id < s[j].id }
// branchPageElement represents a node on a branch page.
type branchPageElement struct {
pos uint32
ksize uint32
pgid pgid
}
// key returns a byte slice of the node key.
func (n *branchPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize]
}
// leafPageElement represents a node on a leaf page.
type leafPageElement struct {
flags uint32
pos uint32
ksize uint32
vsize uint32
}
// key returns a byte slice of the node key.
func (n *leafPageElement) key() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize]
}
// value returns a byte slice of the node value.
func (n *leafPageElement) value() []byte {
buf := (*[maxAllocSize]byte)(unsafe.Pointer(n))
return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize]
}
// PageInfo represents human readable information about a page.
type PageInfo struct {
ID int
Type string
Count int
OverflowCount int
}
type pgids []pgid
func (s pgids) Len() int { return len(s) }
func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s pgids) Less(i, j int) bool { return s[i] < s[j] }
// merge returns the sorted union of a and b.
func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
}
if len(b) == 0 {
return a
}
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
if b[0] < a[0] {
lead, follow = b, a
}
// Continue while there are elements in the lead.
for len(lead) > 0 {
// Merge largest prefix of lead that is ahead of follow[0].
n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] })
merged = append(merged, lead[:n]...)
if n >= len(lead) {
break
}
// Swap lead and follow.
lead, follow = follow, lead[n:]
}
// Append what's left in follow.
_ = append(merged, follow...)
}

684
vendor/github.com/boltdb/bolt/tx.go generated vendored Normal file
View File

@ -0,0 +1,684 @@
package bolt
import (
"fmt"
"io"
"os"
"sort"
"strings"
"time"
"unsafe"
)
// txid represents the internal transaction identifier.
type txid uint64
// Tx represents a read-only or read/write transaction on the database.
// Read-only transactions can be used for retrieving values for keys and creating cursors.
// Read/write transactions can create and remove buckets and create and remove keys.
//
// IMPORTANT: You must commit or rollback transactions when you are done with
// them. Pages can not be reclaimed by the writer until no more transactions
// are using them. A long running read transaction can cause the database to
// quickly grow.
type Tx struct {
writable bool
managed bool
db *DB
meta *meta
root Bucket
pages map[pgid]*page
stats TxStats
commitHandlers []func()
// WriteFlag specifies the flag for write-related methods like WriteTo().
// Tx opens the database file with the specified flag to copy the data.
//
// By default, the flag is unset, which works well for mostly in-memory
// workloads. For databases that are much larger than available RAM,
// set the flag to syscall.O_DIRECT to avoid trashing the page cache.
WriteFlag int
}
// init initializes the transaction.
func (tx *Tx) init(db *DB) {
tx.db = db
tx.pages = nil
// Copy the meta page since it can be changed by the writer.
tx.meta = &meta{}
db.meta().copy(tx.meta)
// Copy over the root bucket.
tx.root = newBucket(tx)
tx.root.bucket = &bucket{}
*tx.root.bucket = tx.meta.root
// Increment the transaction id and add a page cache for writable transactions.
if tx.writable {
tx.pages = make(map[pgid]*page)
tx.meta.txid += txid(1)
}
}
// ID returns the transaction id.
func (tx *Tx) ID() int {
return int(tx.meta.txid)
}
// DB returns a reference to the database that created the transaction.
func (tx *Tx) DB() *DB {
return tx.db
}
// Size returns current database size in bytes as seen by this transaction.
func (tx *Tx) Size() int64 {
return int64(tx.meta.pgid) * int64(tx.db.pageSize)
}
// Writable returns whether the transaction can perform write operations.
func (tx *Tx) Writable() bool {
return tx.writable
}
// Cursor creates a cursor associated with the root bucket.
// All items in the cursor will return a nil value because all root bucket keys point to buckets.
// The cursor is only valid as long as the transaction is open.
// Do not use a cursor after the transaction is closed.
func (tx *Tx) Cursor() *Cursor {
return tx.root.Cursor()
}
// Stats retrieves a copy of the current transaction statistics.
func (tx *Tx) Stats() TxStats {
return tx.stats
}
// Bucket retrieves a bucket by name.
// Returns nil if the bucket does not exist.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) Bucket(name []byte) *Bucket {
return tx.root.Bucket(name)
}
// CreateBucket creates a new bucket.
// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) {
return tx.root.CreateBucket(name)
}
// CreateBucketIfNotExists creates a new bucket if it doesn't already exist.
// Returns an error if the bucket name is blank, or if the bucket name is too long.
// The bucket instance is only valid for the lifetime of the transaction.
func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) {
return tx.root.CreateBucketIfNotExists(name)
}
// DeleteBucket deletes a bucket.
// Returns an error if the bucket cannot be found or if the key represents a non-bucket value.
func (tx *Tx) DeleteBucket(name []byte) error {
return tx.root.DeleteBucket(name)
}
// ForEach executes a function for each bucket in the root.
// If the provided function returns an error then the iteration is stopped and
// the error is returned to the caller.
func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error {
return tx.root.ForEach(func(k, v []byte) error {
if err := fn(k, tx.root.Bucket(k)); err != nil {
return err
}
return nil
})
}
// OnCommit adds a handler function to be executed after the transaction successfully commits.
func (tx *Tx) OnCommit(fn func()) {
tx.commitHandlers = append(tx.commitHandlers, fn)
}
// Commit writes all changes to disk and updates the meta page.
// Returns an error if a disk write error occurs, or if Commit is
// called on a read-only transaction.
func (tx *Tx) Commit() error {
_assert(!tx.managed, "managed tx commit not allowed")
if tx.db == nil {
return ErrTxClosed
} else if !tx.writable {
return ErrTxNotWritable
}
// TODO(benbjohnson): Use vectorized I/O to write out dirty pages.
// Rebalance nodes which have had deletions.
var startTime = time.Now()
tx.root.rebalance()
if tx.stats.Rebalance > 0 {
tx.stats.RebalanceTime += time.Since(startTime)
}
// spill data onto dirty pages.
startTime = time.Now()
if err := tx.root.spill(); err != nil {
tx.rollback()
return err
}
tx.stats.SpillTime += time.Since(startTime)
// Free the old root bucket.
tx.meta.root.root = tx.root.root
opgid := tx.meta.pgid
// Free the freelist and allocate new pages for it. This will overestimate
// the size of the freelist but not underestimate the size (which would be bad).
tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist))
p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1)
if err != nil {
tx.rollback()
return err
}
if err := tx.db.freelist.write(p); err != nil {
tx.rollback()
return err
}
tx.meta.freelist = p.id
// If the high water mark has moved up then attempt to grow the database.
if tx.meta.pgid > opgid {
if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil {
tx.rollback()
return err
}
}
// Write dirty pages to disk.
startTime = time.Now()
if err := tx.write(); err != nil {
tx.rollback()
return err
}
// If strict mode is enabled then perform a consistency check.
// Only the first consistency error is reported in the panic.
if tx.db.StrictMode {
ch := tx.Check()
var errs []string
for {
err, ok := <-ch
if !ok {
break
}
errs = append(errs, err.Error())
}
if len(errs) > 0 {
panic("check fail: " + strings.Join(errs, "\n"))
}
}
// Write meta to disk.
if err := tx.writeMeta(); err != nil {
tx.rollback()
return err
}
tx.stats.WriteTime += time.Since(startTime)
// Finalize the transaction.
tx.close()
// Execute commit handlers now that the locks have been removed.
for _, fn := range tx.commitHandlers {
fn()
}
return nil
}
// Rollback closes the transaction and ignores all previous updates. Read-only
// transactions must be rolled back and not committed.
func (tx *Tx) Rollback() error {
_assert(!tx.managed, "managed tx rollback not allowed")
if tx.db == nil {
return ErrTxClosed
}
tx.rollback()
return nil
}
func (tx *Tx) rollback() {
if tx.db == nil {
return
}
if tx.writable {
tx.db.freelist.rollback(tx.meta.txid)
tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist))
}
tx.close()
}
func (tx *Tx) close() {
if tx.db == nil {
return
}
if tx.writable {
// Grab freelist stats.
var freelistFreeN = tx.db.freelist.free_count()
var freelistPendingN = tx.db.freelist.pending_count()
var freelistAlloc = tx.db.freelist.size()
// Remove transaction ref & writer lock.
tx.db.rwtx = nil
tx.db.rwlock.Unlock()
// Merge statistics.
tx.db.statlock.Lock()
tx.db.stats.FreePageN = freelistFreeN
tx.db.stats.PendingPageN = freelistPendingN
tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize
tx.db.stats.FreelistInuse = freelistAlloc
tx.db.stats.TxStats.add(&tx.stats)
tx.db.statlock.Unlock()
} else {
tx.db.removeTx(tx)
}
// Clear all references.
tx.db = nil
tx.meta = nil
tx.root = Bucket{tx: tx}
tx.pages = nil
}
// Copy writes the entire database to a writer.
// This function exists for backwards compatibility. Use WriteTo() instead.
func (tx *Tx) Copy(w io.Writer) error {
_, err := tx.WriteTo(w)
return err
}
// WriteTo writes the entire database to a writer.
// If err == nil then exactly tx.Size() bytes will be written into the writer.
func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) {
// Attempt to open reader with WriteFlag
f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0)
if err != nil {
return 0, err
}
defer func() { _ = f.Close() }()
// Generate a meta page. We use the same page data for both meta pages.
buf := make([]byte, tx.db.pageSize)
page := (*page)(unsafe.Pointer(&buf[0]))
page.flags = metaPageFlag
*page.meta() = *tx.meta
// Write meta 0.
page.id = 0
page.meta().checksum = page.meta().sum64()
nn, err := w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 0 copy: %s", err)
}
// Write meta 1 with a lower transaction id.
page.id = 1
page.meta().txid -= 1
page.meta().checksum = page.meta().sum64()
nn, err = w.Write(buf)
n += int64(nn)
if err != nil {
return n, fmt.Errorf("meta 1 copy: %s", err)
}
// Move past the meta pages in the file.
if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil {
return n, fmt.Errorf("seek: %s", err)
}
// Copy data pages.
wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2))
n += wn
if err != nil {
return n, err
}
return n, f.Close()
}
// CopyFile copies the entire database to file at the given path.
// A reader transaction is maintained during the copy so it is safe to continue
// using the database while a copy is in progress.
func (tx *Tx) CopyFile(path string, mode os.FileMode) error {
f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode)
if err != nil {
return err
}
err = tx.Copy(f)
if err != nil {
_ = f.Close()
return err
}
return f.Close()
}
// Check performs several consistency checks on the database for this transaction.
// An error is returned if any inconsistency is found.
//
// It can be safely run concurrently on a writable transaction. However, this
// incurs a high cost for large databases and databases with a lot of subbuckets
// because of caching. This overhead can be removed if running on a read-only
// transaction, however, it is not safe to execute other writer transactions at
// the same time.
func (tx *Tx) Check() <-chan error {
ch := make(chan error)
go tx.check(ch)
return ch
}
func (tx *Tx) check(ch chan error) {
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}
freed[id] = true
}
// Track every reachable page.
reachable := make(map[pgid]*page)
reachable[0] = tx.page(0) // meta0
reachable[1] = tx.page(1) // meta1
for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ {
reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist)
}
// Recursively check buckets.
tx.checkBucket(&tx.root, reachable, freed, ch)
// Ensure all pages below high water mark are either reachable or freed.
for i := pgid(0); i < tx.meta.pgid; i++ {
_, isReachable := reachable[i]
if !isReachable && !freed[i] {
ch <- fmt.Errorf("page %d: unreachable unfreed", int(i))
}
}
// Close the channel to signal completion.
close(ch)
}
func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) {
// Ignore inline buckets.
if b.root == 0 {
return
}
// Check every page used by this bucket.
b.tx.forEachPage(b.root, 0, func(p *page, _ int) {
if p.id > tx.meta.pgid {
ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid))
}
// Ensure each page is only referenced once.
for i := pgid(0); i <= pgid(p.overflow); i++ {
var id = p.id + i
if _, ok := reachable[id]; ok {
ch <- fmt.Errorf("page %d: multiple references", int(id))
}
reachable[id] = p
}
// We should only encounter un-freed leaf and branch pages.
if freed[p.id] {
ch <- fmt.Errorf("page %d: reachable freed", int(p.id))
} else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 {
ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ())
}
})
// Check each bucket within this bucket.
_ = b.ForEach(func(k, v []byte) error {
if child := b.Bucket(k); child != nil {
tx.checkBucket(child, reachable, freed, ch)
}
return nil
})
}
// allocate returns a contiguous block of memory starting at a given page.
func (tx *Tx) allocate(count int) (*page, error) {
p, err := tx.db.allocate(count)
if err != nil {
return nil, err
}
// Save to our page cache.
tx.pages[p.id] = p
// Update statistics.
tx.stats.PageCount++
tx.stats.PageAlloc += count * tx.db.pageSize
return p, nil
}
// write writes any dirty pages to disk.
func (tx *Tx) write() error {
// Sort pages by id.
pages := make(pages, 0, len(tx.pages))
for _, p := range tx.pages {
pages = append(pages, p)
}
// Clear out page cache early.
tx.pages = make(map[pgid]*page)
sort.Sort(pages)
// Write pages to disk in order.
for _, p := range pages {
size := (int(p.overflow) + 1) * tx.db.pageSize
offset := int64(p.id) * int64(tx.db.pageSize)
// Write out page in "max allocation" sized chunks.
ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p))
for {
// Limit our write to our max allocation size.
sz := size
if sz > maxAllocSize-1 {
sz = maxAllocSize - 1
}
// Write chunk to disk.
buf := ptr[:sz]
if _, err := tx.db.ops.writeAt(buf, offset); err != nil {
return err
}
// Update statistics.
tx.stats.Write++
// Exit inner for loop if we've written all the chunks.
size -= sz
if size == 0 {
break
}
// Otherwise move offset forward and move pointer to next chunk.
offset += int64(sz)
ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz]))
}
}
// Ignore file sync if flag is set on DB.
if !tx.db.NoSync || IgnoreNoSync {
if err := fdatasync(tx.db); err != nil {
return err
}
}
// Put small pages back to page pool.
for _, p := range pages {
// Ignore page sizes over 1 page.
// These are allocated using make() instead of the page pool.
if int(p.overflow) != 0 {
continue
}
buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize]
// See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1
for i := range buf {
buf[i] = 0
}
tx.db.pagePool.Put(buf)
}
return nil
}
// writeMeta writes the meta to the disk.
func (tx *Tx) writeMeta() error {
// Create a temporary buffer for the meta page.
buf := make([]byte, tx.db.pageSize)
p := tx.db.pageInBuffer(buf, 0)
tx.meta.write(p)
// Write the meta page to file.
if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil {
return err
}
if !tx.db.NoSync || IgnoreNoSync {
if err := fdatasync(tx.db); err != nil {
return err
}
}
// Update statistics.
tx.stats.Write++
return nil
}
// page returns a reference to the page with a given id.
// If page has been written to then a temporary buffered page is returned.
func (tx *Tx) page(id pgid) *page {
// Check the dirty pages first.
if tx.pages != nil {
if p, ok := tx.pages[id]; ok {
return p
}
}
// Otherwise return directly from the mmap.
return tx.db.page(id)
}
// forEachPage iterates over every page within a given page and executes a function.
func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) {
p := tx.page(pgid)
// Execute function.
fn(p, depth)
// Recursively loop over children.
if (p.flags & branchPageFlag) != 0 {
for i := 0; i < int(p.count); i++ {
elem := p.branchPageElement(uint16(i))
tx.forEachPage(elem.pgid, depth+1, fn)
}
}
}
// Page returns page information for a given page number.
// This is only safe for concurrent use when used by a writable transaction.
func (tx *Tx) Page(id int) (*PageInfo, error) {
if tx.db == nil {
return nil, ErrTxClosed
} else if pgid(id) >= tx.meta.pgid {
return nil, nil
}
// Build the page info.
p := tx.db.page(pgid(id))
info := &PageInfo{
ID: id,
Count: int(p.count),
OverflowCount: int(p.overflow),
}
// Determine the type (or if it's free).
if tx.db.freelist.freed(pgid(id)) {
info.Type = "free"
} else {
info.Type = p.typ()
}
return info, nil
}
// TxStats represents statistics about the actions performed by the transaction.
type TxStats struct {
// Page statistics.
PageCount int // number of page allocations
PageAlloc int // total bytes allocated
// Cursor statistics.
CursorCount int // number of cursors created
// Node statistics
NodeCount int // number of node allocations
NodeDeref int // number of node dereferences
// Rebalance statistics.
Rebalance int // number of node rebalances
RebalanceTime time.Duration // total time spent rebalancing
// Split/Spill statistics.
Split int // number of nodes split
Spill int // number of nodes spilled
SpillTime time.Duration // total time spent spilling
// Write statistics.
Write int // number of writes performed
WriteTime time.Duration // total time spent writing to disk
}
func (s *TxStats) add(other *TxStats) {
s.PageCount += other.PageCount
s.PageAlloc += other.PageAlloc
s.CursorCount += other.CursorCount
s.NodeCount += other.NodeCount
s.NodeDeref += other.NodeDeref
s.Rebalance += other.Rebalance
s.RebalanceTime += other.RebalanceTime
s.Split += other.Split
s.Spill += other.Spill
s.SpillTime += other.SpillTime
s.Write += other.Write
s.WriteTime += other.WriteTime
}
// Sub calculates and returns the difference between two sets of transaction stats.
// This is useful when obtaining stats at two different points and time and
// you need the performance counters that occurred within that time span.
func (s *TxStats) Sub(other *TxStats) TxStats {
var diff TxStats
diff.PageCount = s.PageCount - other.PageCount
diff.PageAlloc = s.PageAlloc - other.PageAlloc
diff.CursorCount = s.CursorCount - other.CursorCount
diff.NodeCount = s.NodeCount - other.NodeCount
diff.NodeDeref = s.NodeDeref - other.NodeDeref
diff.Rebalance = s.Rebalance - other.Rebalance
diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime
diff.Split = s.Split - other.Split
diff.Spill = s.Spill - other.Spill
diff.SpillTime = s.SpillTime - other.SpillTime
diff.Write = s.Write - other.Write
diff.WriteTime = s.WriteTime - other.WriteTime
return diff
}

275
vendor/github.com/caarlos0/env/env.go generated vendored Normal file
View File

@ -0,0 +1,275 @@
package env
import (
"errors"
"os"
"reflect"
"strconv"
"strings"
"time"
)
var (
// ErrNotAStructPtr is returned if you pass something that is not a pointer to a
// Struct to Parse
ErrNotAStructPtr = errors.New("Expected a pointer to a Struct")
// ErrUnsupportedType if the struct field type is not supported by env
ErrUnsupportedType = errors.New("Type is not supported")
// ErrUnsupportedSliceType if the slice element type is not supported by env
ErrUnsupportedSliceType = errors.New("Unsupported slice type")
// Friendly names for reflect types
sliceOfInts = reflect.TypeOf([]int(nil))
sliceOfInt64s = reflect.TypeOf([]int64(nil))
sliceOfStrings = reflect.TypeOf([]string(nil))
sliceOfBools = reflect.TypeOf([]bool(nil))
sliceOfFloat32s = reflect.TypeOf([]float32(nil))
sliceOfFloat64s = reflect.TypeOf([]float64(nil))
)
// Parse parses a struct containing `env` tags and loads its values from
// environment variables.
func Parse(v interface{}) error {
ptrRef := reflect.ValueOf(v)
if ptrRef.Kind() != reflect.Ptr {
return ErrNotAStructPtr
}
ref := ptrRef.Elem()
if ref.Kind() != reflect.Struct {
return ErrNotAStructPtr
}
return doParse(ref)
}
func doParse(ref reflect.Value) error {
refType := ref.Type()
for i := 0; i < refType.NumField(); i++ {
value, err := get(refType.Field(i))
if err != nil {
return err
}
if value == "" {
continue
}
if err := set(ref.Field(i), refType.Field(i), value); err != nil {
return err
}
}
return nil
}
func get(field reflect.StructField) (string, error) {
var (
val string
err error
)
key, opts := parseKeyForOption(field.Tag.Get("env"))
defaultValue := field.Tag.Get("envDefault")
val = getOr(key, defaultValue)
if len(opts) > 0 {
for _, opt := range opts {
// The only option supported is "required".
switch opt {
case "":
break
case "required":
val, err = getRequired(key)
default:
err = errors.New("Env tag option " + opt + " not supported.")
}
}
}
return val, err
}
// split the env tag's key into the expected key and desired option, if any.
func parseKeyForOption(key string) (string, []string) {
opts := strings.Split(key, ",")
return opts[0], opts[1:]
}
func getRequired(key string) (string, error) {
if value := os.Getenv(key); value != "" {
return value, nil
}
// We do not use fmt.Errorf to avoid another import.
return "", errors.New("Required environment variable " + key + " is not set")
}
func getOr(key, defaultValue string) string {
value := os.Getenv(key)
if value != "" {
return value
}
return defaultValue
}
func set(field reflect.Value, refType reflect.StructField, value string) error {
switch field.Kind() {
case reflect.Slice:
separator := refType.Tag.Get("envSeparator")
return handleSlice(field, value, separator)
case reflect.String:
field.SetString(value)
case reflect.Bool:
bvalue, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(bvalue)
case reflect.Int:
intValue, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return err
}
field.SetInt(intValue)
case reflect.Float32:
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return err
}
field.SetFloat(v)
case reflect.Float64:
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
field.Set(reflect.ValueOf(v))
case reflect.Int64:
if refType.Type.String() == "time.Duration" {
dValue, err := time.ParseDuration(value)
if err != nil {
return err
}
field.Set(reflect.ValueOf(dValue))
} else {
intValue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err
}
field.SetInt(intValue)
}
default:
return ErrUnsupportedType
}
return nil
}
func handleSlice(field reflect.Value, value, separator string) error {
if separator == "" {
separator = ","
}
splitData := strings.Split(value, separator)
switch field.Type() {
case sliceOfStrings:
field.Set(reflect.ValueOf(splitData))
case sliceOfInts:
intData, err := parseInts(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(intData))
case sliceOfInt64s:
int64Data, err := parseInt64s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(int64Data))
case sliceOfFloat32s:
data, err := parseFloat32s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(data))
case sliceOfFloat64s:
data, err := parseFloat64s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(data))
case sliceOfBools:
boolData, err := parseBools(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(boolData))
default:
return ErrUnsupportedSliceType
}
return nil
}
func parseInts(data []string) ([]int, error) {
var intSlice []int
for _, v := range data {
intValue, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, err
}
intSlice = append(intSlice, int(intValue))
}
return intSlice, nil
}
func parseInt64s(data []string) ([]int64, error) {
var intSlice []int64
for _, v := range data {
intValue, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
intSlice = append(intSlice, int64(intValue))
}
return intSlice, nil
}
func parseFloat32s(data []string) ([]float32, error) {
var float32Slice []float32
for _, v := range data {
data, err := strconv.ParseFloat(v, 32)
if err != nil {
return nil, err
}
float32Slice = append(float32Slice, float32(data))
}
return float32Slice, nil
}
func parseFloat64s(data []string) ([]float64, error) {
var float64Slice []float64
for _, v := range data {
data, err := strconv.ParseFloat(v, 64)
if err != nil {
return nil, err
}
float64Slice = append(float64Slice, float64(data))
}
return float64Slice, nil
}
func parseBools(data []string) ([]bool, error) {
var boolSlice []bool
for _, v := range data {
bvalue, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
boolSlice = append(boolSlice, bvalue)
}
return boolSlice, nil
}

View File

@ -0,0 +1,35 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package containers provides core interfaces and functions for data structures.
//
// Container is the base interface for all data structures to implement.
//
// Iterators provide stateful iterators.
//
// Enumerable provides Ruby inspired (each, select, map, find, any?, etc.) container functions.
//
// Serialization provides serializers (marshalers) and deserializers (unmarshalers).
package containers
import "github.com/emirpasic/gods/utils"
// Container is base interface that all data structures implement.
type Container interface {
Empty() bool
Size() int
Clear()
Values() []interface{}
}
// GetSortedValues returns sorted container's elements with respect to the passed comparator.
// Does not effect the ordering of elements within the container.
func GetSortedValues(container Container, comparator utils.Comparator) []interface{} {
values := container.Values()
if len(values) < 2 {
return values
}
utils.Sort(values, comparator)
return values
}

View File

@ -0,0 +1,61 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// EnumerableWithIndex provides functions for ordered containers whose values can be fetched by an index.
type EnumerableWithIndex interface {
// Each calls the given function once for each element, passing that element's index and value.
Each(func(index int, value interface{}))
// Map invokes the given function once for each element and returns a
// container containing the values returned by the given function.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(index int, value interface{}) interface{}) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(index int, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(index int, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(index int, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (index,value) for which the function is true or -1,nil otherwise
// if no element matches the criteria.
Find(func(index int, value interface{}) bool) (int, interface{})
}
// EnumerableWithKey provides functions for ordered containers whose values whose elements are key/value pairs.
type EnumerableWithKey interface {
// Each calls the given function once for each element, passing that element's key and value.
Each(func(key interface{}, value interface{}))
// Map invokes the given function once for each element and returns a container
// containing the values returned by the given function as key/value pairs.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Map(func(key interface{}, value interface{}) (interface{}, interface{})) Container
// Select returns a new container containing all elements for which the given function returns a true value.
// TODO need help on how to enforce this in containers (don't want to type assert when chaining)
// Select(func(key interface{}, value interface{}) bool) Container
// Any passes each element of the container to the given function and
// returns true if the function ever returns true for any element.
Any(func(key interface{}, value interface{}) bool) bool
// All passes each element of the container to the given function and
// returns true if the function returns true for all elements.
All(func(key interface{}, value interface{}) bool) bool
// Find passes each element of the container to the given function and returns
// the first (key,value) for which the function is true or nil,nil otherwise if no element
// matches the criteria.
Find(func(key interface{}, value interface{}) bool) (interface{}, interface{})
}

109
vendor/github.com/emirpasic/gods/containers/iterator.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// IteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
type IteratorWithIndex interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's index and value can be retrieved by Index() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Index returns the current element's index.
// Does not modify the state of the iterator.
Index() int
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
First() bool
}
// IteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
type IteratorWithKey interface {
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
Next() bool
// Value returns the current element's value.
// Does not modify the state of the iterator.
Value() interface{}
// Key returns the current element's key.
// Does not modify the state of the iterator.
Key() interface{}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
Begin()
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
First() bool
}
// ReverseIteratorWithIndex is stateful iterator for ordered containers whose values can be fetched by an index.
//
// Essentially it is the same as IteratorWithIndex, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
//
// End() function to move the iterator past the last element (one-past-the-end).
type ReverseIteratorWithIndex interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's index and value can be retrieved by Index() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithIndex
}
// ReverseIteratorWithKey is a stateful iterator for ordered containers whose elements are key value pairs.
//
// Essentially it is the same as IteratorWithKey, but provides additional:
//
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
type ReverseIteratorWithKey interface {
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Prev() bool
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
End()
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
Last() bool
IteratorWithKey
}

View File

@ -0,0 +1,17 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package containers
// JSONSerializer provides JSON serialization
type JSONSerializer interface {
// ToJSON outputs the JSON representation of containers's elements.
ToJSON() ([]byte, error)
}
// JSONDeserializer provides JSON deserialization
type JSONDeserializer interface {
// FromJSON populates containers's elements from the input JSON representation.
FromJSON([]byte) error
}

View File

@ -0,0 +1,156 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package redblacktree
import "github.com/emirpasic/gods/containers"
func assertIteratorImplementation() {
var _ containers.ReverseIteratorWithKey = (*Iterator)(nil)
}
// Iterator holding the iterator's state
type Iterator struct {
tree *Tree
node *Node
position position
}
type position byte
const (
begin, between, end position = 0, 1, 2
)
// Iterator returns a stateful iterator whose elements are key/value pairs.
func (tree *Tree) Iterator() Iterator {
return Iterator{tree: tree, node: nil, position: begin}
}
// Next moves the iterator to the next element and returns true if there was a next element in the container.
// If Next() returns true, then next element's key and value can be retrieved by Key() and Value().
// If Next() was called for the first time, then it will point the iterator to the first element if it exists.
// Modifies the state of the iterator.
func (iterator *Iterator) Next() bool {
if iterator.position == end {
goto end
}
if iterator.position == begin {
left := iterator.tree.Left()
if left == nil {
goto end
}
iterator.node = left
goto between
}
if iterator.node.Right != nil {
iterator.node = iterator.node.Right
for iterator.node.Left != nil {
iterator.node = iterator.node.Left
}
goto between
}
if iterator.node.Parent != nil {
node := iterator.node
for iterator.node.Parent != nil {
iterator.node = iterator.node.Parent
if iterator.tree.Comparator(node.Key, iterator.node.Key) <= 0 {
goto between
}
}
}
end:
iterator.node = nil
iterator.position = end
return false
between:
iterator.position = between
return true
}
// Prev moves the iterator to the previous element and returns true if there was a previous element in the container.
// If Prev() returns true, then previous element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Prev() bool {
if iterator.position == begin {
goto begin
}
if iterator.position == end {
right := iterator.tree.Right()
if right == nil {
goto begin
}
iterator.node = right
goto between
}
if iterator.node.Left != nil {
iterator.node = iterator.node.Left
for iterator.node.Right != nil {
iterator.node = iterator.node.Right
}
goto between
}
if iterator.node.Parent != nil {
node := iterator.node
for iterator.node.Parent != nil {
iterator.node = iterator.node.Parent
if iterator.tree.Comparator(node.Key, iterator.node.Key) >= 0 {
goto between
}
}
}
begin:
iterator.node = nil
iterator.position = begin
return false
between:
iterator.position = between
return true
}
// Value returns the current element's value.
// Does not modify the state of the iterator.
func (iterator *Iterator) Value() interface{} {
return iterator.node.Value
}
// Key returns the current element's key.
// Does not modify the state of the iterator.
func (iterator *Iterator) Key() interface{} {
return iterator.node.Key
}
// Begin resets the iterator to its initial state (one-before-first)
// Call Next() to fetch the first element if any.
func (iterator *Iterator) Begin() {
iterator.node = nil
iterator.position = begin
}
// End moves the iterator past the last element (one-past-the-end).
// Call Prev() to fetch the last element if any.
func (iterator *Iterator) End() {
iterator.node = nil
iterator.position = end
}
// First moves the iterator to the first element and returns true if there was a first element in the container.
// If First() returns true, then first element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator
func (iterator *Iterator) First() bool {
iterator.Begin()
return iterator.Next()
}
// Last moves the iterator to the last element and returns true if there was a last element in the container.
// If Last() returns true, then last element's key and value can be retrieved by Key() and Value().
// Modifies the state of the iterator.
func (iterator *Iterator) Last() bool {
iterator.End()
return iterator.Prev()
}

View File

@ -0,0 +1,522 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package redblacktree implements a red-black tree.
//
// Used by TreeSet and TreeMap.
//
// Structure is not thread safe.
//
// References: http://en.wikipedia.org/wiki/Red%E2%80%93black_tree
package redblacktree
import (
"fmt"
"github.com/emirpasic/gods/trees"
"github.com/emirpasic/gods/utils"
)
func assertTreeImplementation() {
var _ trees.Tree = (*Tree)(nil)
}
type color bool
const (
black, red color = true, false
)
// Tree holds elements of the red-black tree
type Tree struct {
Root *Node
size int
Comparator utils.Comparator
}
// Node is a single element within the tree
type Node struct {
Key interface{}
Value interface{}
color color
Left *Node
Right *Node
Parent *Node
}
// NewWith instantiates a red-black tree with the custom comparator.
func NewWith(comparator utils.Comparator) *Tree {
return &Tree{Comparator: comparator}
}
// NewWithIntComparator instantiates a red-black tree with the IntComparator, i.e. keys are of type int.
func NewWithIntComparator() *Tree {
return &Tree{Comparator: utils.IntComparator}
}
// NewWithStringComparator instantiates a red-black tree with the StringComparator, i.e. keys are of type string.
func NewWithStringComparator() *Tree {
return &Tree{Comparator: utils.StringComparator}
}
// Put inserts node into the tree.
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Put(key interface{}, value interface{}) {
insertedNode := &Node{Key: key, Value: value, color: red}
if tree.Root == nil {
tree.Root = insertedNode
} else {
node := tree.Root
loop := true
for loop {
compare := tree.Comparator(key, node.Key)
switch {
case compare == 0:
node.Key = key
node.Value = value
return
case compare < 0:
if node.Left == nil {
node.Left = insertedNode
loop = false
} else {
node = node.Left
}
case compare > 0:
if node.Right == nil {
node.Right = insertedNode
loop = false
} else {
node = node.Right
}
}
}
insertedNode.Parent = node
}
tree.insertCase1(insertedNode)
tree.size++
}
// Get searches the node in the tree by key and returns its value or nil if key is not found in tree.
// Second return parameter is true if key was found, otherwise false.
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Get(key interface{}) (value interface{}, found bool) {
node := tree.lookup(key)
if node != nil {
return node.Value, true
}
return nil, false
}
// Remove remove the node from the tree by key.
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Remove(key interface{}) {
var child *Node
node := tree.lookup(key)
if node == nil {
return
}
if node.Left != nil && node.Right != nil {
pred := node.Left.maximumNode()
node.Key = pred.Key
node.Value = pred.Value
node = pred
}
if node.Left == nil || node.Right == nil {
if node.Right == nil {
child = node.Left
} else {
child = node.Right
}
if node.color == black {
node.color = nodeColor(child)
tree.deleteCase1(node)
}
tree.replaceNode(node, child)
if node.Parent == nil && child != nil {
child.color = black
}
}
tree.size--
}
// Empty returns true if tree does not contain any nodes
func (tree *Tree) Empty() bool {
return tree.size == 0
}
// Size returns number of nodes in the tree.
func (tree *Tree) Size() int {
return tree.size
}
// Keys returns all keys in-order
func (tree *Tree) Keys() []interface{} {
keys := make([]interface{}, tree.size)
it := tree.Iterator()
for i := 0; it.Next(); i++ {
keys[i] = it.Key()
}
return keys
}
// Values returns all values in-order based on the key.
func (tree *Tree) Values() []interface{} {
values := make([]interface{}, tree.size)
it := tree.Iterator()
for i := 0; it.Next(); i++ {
values[i] = it.Value()
}
return values
}
// Left returns the left-most (min) node or nil if tree is empty.
func (tree *Tree) Left() *Node {
var parent *Node
current := tree.Root
for current != nil {
parent = current
current = current.Left
}
return parent
}
// Right returns the right-most (max) node or nil if tree is empty.
func (tree *Tree) Right() *Node {
var parent *Node
current := tree.Root
for current != nil {
parent = current
current = current.Right
}
return parent
}
// Floor Finds floor node of the input key, return the floor node or nil if no ceiling is found.
// Second return parameter is true if floor was found, otherwise false.
//
// Floor node is defined as the largest node that is smaller than or equal to the given node.
// A floor node may not be found, either because the tree is empty, or because
// all nodes in the tree is larger than the given node.
//
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Floor(key interface{}) (floor *Node, found bool) {
found = false
node := tree.Root
for node != nil {
compare := tree.Comparator(key, node.Key)
switch {
case compare == 0:
return node, true
case compare < 0:
node = node.Left
case compare > 0:
floor, found = node, true
node = node.Right
}
}
if found {
return floor, true
}
return nil, false
}
// Ceiling finds ceiling node of the input key, return the ceiling node or nil if no ceiling is found.
// Second return parameter is true if ceiling was found, otherwise false.
//
// Ceiling node is defined as the smallest node that is larger than or equal to the given node.
// A ceiling node may not be found, either because the tree is empty, or because
// all nodes in the tree is smaller than the given node.
//
// Key should adhere to the comparator's type assertion, otherwise method panics.
func (tree *Tree) Ceiling(key interface{}) (ceiling *Node, found bool) {
found = false
node := tree.Root
for node != nil {
compare := tree.Comparator(key, node.Key)
switch {
case compare == 0:
return node, true
case compare < 0:
ceiling, found = node, true
node = node.Left
case compare > 0:
node = node.Right
}
}
if found {
return ceiling, true
}
return nil, false
}
// Clear removes all nodes from the tree.
func (tree *Tree) Clear() {
tree.Root = nil
tree.size = 0
}
// String returns a string representation of container
func (tree *Tree) String() string {
str := "RedBlackTree\n"
if !tree.Empty() {
output(tree.Root, "", true, &str)
}
return str
}
func (node *Node) String() string {
return fmt.Sprintf("%v", node.Key)
}
func output(node *Node, prefix string, isTail bool, str *string) {
if node.Right != nil {
newPrefix := prefix
if isTail {
newPrefix += "│ "
} else {
newPrefix += " "
}
output(node.Right, newPrefix, false, str)
}
*str += prefix
if isTail {
*str += "└── "
} else {
*str += "┌── "
}
*str += node.String() + "\n"
if node.Left != nil {
newPrefix := prefix
if isTail {
newPrefix += " "
} else {
newPrefix += "│ "
}
output(node.Left, newPrefix, true, str)
}
}
func (tree *Tree) lookup(key interface{}) *Node {
node := tree.Root
for node != nil {
compare := tree.Comparator(key, node.Key)
switch {
case compare == 0:
return node
case compare < 0:
node = node.Left
case compare > 0:
node = node.Right
}
}
return nil
}
func (node *Node) grandparent() *Node {
if node != nil && node.Parent != nil {
return node.Parent.Parent
}
return nil
}
func (node *Node) uncle() *Node {
if node == nil || node.Parent == nil || node.Parent.Parent == nil {
return nil
}
return node.Parent.sibling()
}
func (node *Node) sibling() *Node {
if node == nil || node.Parent == nil {
return nil
}
if node == node.Parent.Left {
return node.Parent.Right
}
return node.Parent.Left
}
func (tree *Tree) rotateLeft(node *Node) {
right := node.Right
tree.replaceNode(node, right)
node.Right = right.Left
if right.Left != nil {
right.Left.Parent = node
}
right.Left = node
node.Parent = right
}
func (tree *Tree) rotateRight(node *Node) {
left := node.Left
tree.replaceNode(node, left)
node.Left = left.Right
if left.Right != nil {
left.Right.Parent = node
}
left.Right = node
node.Parent = left
}
func (tree *Tree) replaceNode(old *Node, new *Node) {
if old.Parent == nil {
tree.Root = new
} else {
if old == old.Parent.Left {
old.Parent.Left = new
} else {
old.Parent.Right = new
}
}
if new != nil {
new.Parent = old.Parent
}
}
func (tree *Tree) insertCase1(node *Node) {
if node.Parent == nil {
node.color = black
} else {
tree.insertCase2(node)
}
}
func (tree *Tree) insertCase2(node *Node) {
if nodeColor(node.Parent) == black {
return
}
tree.insertCase3(node)
}
func (tree *Tree) insertCase3(node *Node) {
uncle := node.uncle()
if nodeColor(uncle) == red {
node.Parent.color = black
uncle.color = black
node.grandparent().color = red
tree.insertCase1(node.grandparent())
} else {
tree.insertCase4(node)
}
}
func (tree *Tree) insertCase4(node *Node) {
grandparent := node.grandparent()
if node == node.Parent.Right && node.Parent == grandparent.Left {
tree.rotateLeft(node.Parent)
node = node.Left
} else if node == node.Parent.Left && node.Parent == grandparent.Right {
tree.rotateRight(node.Parent)
node = node.Right
}
tree.insertCase5(node)
}
func (tree *Tree) insertCase5(node *Node) {
node.Parent.color = black
grandparent := node.grandparent()
grandparent.color = red
if node == node.Parent.Left && node.Parent == grandparent.Left {
tree.rotateRight(grandparent)
} else if node == node.Parent.Right && node.Parent == grandparent.Right {
tree.rotateLeft(grandparent)
}
}
func (node *Node) maximumNode() *Node {
if node == nil {
return nil
}
for node.Right != nil {
node = node.Right
}
return node
}
func (tree *Tree) deleteCase1(node *Node) {
if node.Parent == nil {
return
}
tree.deleteCase2(node)
}
func (tree *Tree) deleteCase2(node *Node) {
sibling := node.sibling()
if nodeColor(sibling) == red {
node.Parent.color = red
sibling.color = black
if node == node.Parent.Left {
tree.rotateLeft(node.Parent)
} else {
tree.rotateRight(node.Parent)
}
}
tree.deleteCase3(node)
}
func (tree *Tree) deleteCase3(node *Node) {
sibling := node.sibling()
if nodeColor(node.Parent) == black &&
nodeColor(sibling) == black &&
nodeColor(sibling.Left) == black &&
nodeColor(sibling.Right) == black {
sibling.color = red
tree.deleteCase1(node.Parent)
} else {
tree.deleteCase4(node)
}
}
func (tree *Tree) deleteCase4(node *Node) {
sibling := node.sibling()
if nodeColor(node.Parent) == red &&
nodeColor(sibling) == black &&
nodeColor(sibling.Left) == black &&
nodeColor(sibling.Right) == black {
sibling.color = red
node.Parent.color = black
} else {
tree.deleteCase5(node)
}
}
func (tree *Tree) deleteCase5(node *Node) {
sibling := node.sibling()
if node == node.Parent.Left &&
nodeColor(sibling) == black &&
nodeColor(sibling.Left) == red &&
nodeColor(sibling.Right) == black {
sibling.color = red
sibling.Left.color = black
tree.rotateRight(sibling)
} else if node == node.Parent.Right &&
nodeColor(sibling) == black &&
nodeColor(sibling.Right) == red &&
nodeColor(sibling.Left) == black {
sibling.color = red
sibling.Right.color = black
tree.rotateLeft(sibling)
}
tree.deleteCase6(node)
}
func (tree *Tree) deleteCase6(node *Node) {
sibling := node.sibling()
sibling.color = nodeColor(node.Parent)
node.Parent.color = black
if node == node.Parent.Left && nodeColor(sibling.Right) == red {
sibling.Right.color = black
tree.rotateLeft(node.Parent)
} else if nodeColor(sibling.Left) == red {
sibling.Left.color = black
tree.rotateRight(node.Parent)
}
}
func nodeColor(node *Node) color {
if node == nil {
return black
}
return node.color
}

View File

@ -0,0 +1,39 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package redblacktree
import (
"encoding/json"
"github.com/emirpasic/gods/containers"
"github.com/emirpasic/gods/utils"
)
func assertSerializationImplementation() {
var _ containers.JSONSerializer = (*Tree)(nil)
var _ containers.JSONDeserializer = (*Tree)(nil)
}
// ToJSON outputs the JSON representation of list's elements.
func (tree *Tree) ToJSON() ([]byte, error) {
elements := make(map[string]interface{})
it := tree.Iterator()
for it.Next() {
elements[utils.ToString(it.Key())] = it.Value()
}
return json.Marshal(&elements)
}
// FromJSON populates list's elements from the input JSON representation.
func (tree *Tree) FromJSON(data []byte) error {
elements := make(map[string]interface{})
err := json.Unmarshal(data, &elements)
if err == nil {
tree.Clear()
for key, value := range elements {
tree.Put(key, value)
}
}
return err
}

21
vendor/github.com/emirpasic/gods/trees/trees.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package trees provides an abstract Tree interface.
//
// In computer science, a tree is a widely used abstract data type (ADT) or data structure implementing this ADT that simulates a hierarchical tree structure, with a root value and subtrees of children with a parent node, represented as a set of linked nodes.
//
// Reference: https://en.wikipedia.org/wiki/Tree_%28data_structure%29
package trees
import "github.com/emirpasic/gods/containers"
// Tree interface that all trees implement
type Tree interface {
containers.Container
// Empty() bool
// Size() int
// Clear()
// Values() []interface{}
}

251
vendor/github.com/emirpasic/gods/utils/comparator.go generated vendored Normal file
View File

@ -0,0 +1,251 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "time"
// Comparator will make type assertion (see IntComparator for example),
// which will panic if a or b are not of the asserted type.
//
// Should return a number:
// negative , if a < b
// zero , if a == b
// positive , if a > b
type Comparator func(a, b interface{}) int
// StringComparator provides a fast comparison on strings
func StringComparator(a, b interface{}) int {
s1 := a.(string)
s2 := b.(string)
min := len(s2)
if len(s1) < len(s2) {
min = len(s1)
}
diff := 0
for i := 0; i < min && diff == 0; i++ {
diff = int(s1[i]) - int(s2[i])
}
if diff == 0 {
diff = len(s1) - len(s2)
}
if diff < 0 {
return -1
}
if diff > 0 {
return 1
}
return 0
}
// IntComparator provides a basic comparison on int
func IntComparator(a, b interface{}) int {
aAsserted := a.(int)
bAsserted := b.(int)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int8Comparator provides a basic comparison on int8
func Int8Comparator(a, b interface{}) int {
aAsserted := a.(int8)
bAsserted := b.(int8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int16Comparator provides a basic comparison on int16
func Int16Comparator(a, b interface{}) int {
aAsserted := a.(int16)
bAsserted := b.(int16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int32Comparator provides a basic comparison on int32
func Int32Comparator(a, b interface{}) int {
aAsserted := a.(int32)
bAsserted := b.(int32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Int64Comparator provides a basic comparison on int64
func Int64Comparator(a, b interface{}) int {
aAsserted := a.(int64)
bAsserted := b.(int64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UIntComparator provides a basic comparison on uint
func UIntComparator(a, b interface{}) int {
aAsserted := a.(uint)
bAsserted := b.(uint)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt8Comparator provides a basic comparison on uint8
func UInt8Comparator(a, b interface{}) int {
aAsserted := a.(uint8)
bAsserted := b.(uint8)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt16Comparator provides a basic comparison on uint16
func UInt16Comparator(a, b interface{}) int {
aAsserted := a.(uint16)
bAsserted := b.(uint16)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt32Comparator provides a basic comparison on uint32
func UInt32Comparator(a, b interface{}) int {
aAsserted := a.(uint32)
bAsserted := b.(uint32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// UInt64Comparator provides a basic comparison on uint64
func UInt64Comparator(a, b interface{}) int {
aAsserted := a.(uint64)
bAsserted := b.(uint64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float32Comparator provides a basic comparison on float32
func Float32Comparator(a, b interface{}) int {
aAsserted := a.(float32)
bAsserted := b.(float32)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// Float64Comparator provides a basic comparison on float64
func Float64Comparator(a, b interface{}) int {
aAsserted := a.(float64)
bAsserted := b.(float64)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// ByteComparator provides a basic comparison on byte
func ByteComparator(a, b interface{}) int {
aAsserted := a.(byte)
bAsserted := b.(byte)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// RuneComparator provides a basic comparison on rune
func RuneComparator(a, b interface{}) int {
aAsserted := a.(rune)
bAsserted := b.(rune)
switch {
case aAsserted > bAsserted:
return 1
case aAsserted < bAsserted:
return -1
default:
return 0
}
}
// TimeComparator provides a basic comparison on time.Time
func TimeComparator(a, b interface{}) int {
aAsserted := a.(time.Time)
bAsserted := b.(time.Time)
switch {
case aAsserted.After(bAsserted):
return 1
case aAsserted.Before(bAsserted):
return -1
default:
return 0
}
}

29
vendor/github.com/emirpasic/gods/utils/sort.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package utils
import "sort"
// Sort sorts values (in-place) with respect to the given comparator.
//
// Uses Go's sort (hybrid of quicksort for large and then insertion sort for smaller slices).
func Sort(values []interface{}, comparator Comparator) {
sort.Sort(sortable{values, comparator})
}
type sortable struct {
values []interface{}
comparator Comparator
}
func (s sortable) Len() int {
return len(s.values)
}
func (s sortable) Swap(i, j int) {
s.values[i], s.values[j] = s.values[j], s.values[i]
}
func (s sortable) Less(i, j int) bool {
return s.comparator(s.values[i], s.values[j]) < 0
}

47
vendor/github.com/emirpasic/gods/utils/utils.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
// Copyright (c) 2015, Emir Pasic. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package utils provides common utility functions.
//
// Provided functionalities:
// - sorting
// - comparators
package utils
import (
"fmt"
"strconv"
)
// ToString converts a value to string.
func ToString(value interface{}) string {
switch value.(type) {
case string:
return value.(string)
case int8:
return strconv.FormatInt(int64(value.(int8)), 10)
case int16:
return strconv.FormatInt(int64(value.(int16)), 10)
case int32:
return strconv.FormatInt(int64(value.(int32)), 10)
case int64:
return strconv.FormatInt(int64(value.(int64)), 10)
case uint8:
return strconv.FormatUint(uint64(value.(uint8)), 10)
case uint16:
return strconv.FormatUint(uint64(value.(uint16)), 10)
case uint32:
return strconv.FormatUint(uint64(value.(uint32)), 10)
case uint64:
return strconv.FormatUint(uint64(value.(uint64)), 10)
case float32:
return strconv.FormatFloat(float64(value.(float32)), 'g', -1, 64)
case float64:
return strconv.FormatFloat(float64(value.(float64)), 'g', -1, 64)
case bool:
return strconv.FormatBool(value.(bool))
default:
return fmt.Sprintf("%+v", value)
}
}