fix quic manually

This commit is contained in:
Cadey Ratio 2018-01-03 11:29:07 -08:00
parent 792c0a9e81
commit 88a375f174
83 changed files with 608 additions and 841 deletions

24
Gopkg.lock generated
View File

@ -76,8 +76,8 @@
[[projects]]
name = "github.com/asdine/storm"
packages = [".","codec","codec/json","index","internal","q"]
revision = "255212403bcca439778718edf5e2d3d50744eca3"
version = "v1.1.0"
revision = "dbd37722730b6cb703b5bd825c3f142d87358525"
version = "v2.0.0"
[[projects]]
name = "github.com/aws/aws-sdk-go"
@ -101,7 +101,7 @@
branch = "master"
name = "github.com/bifurcation/mint"
packages = [".","syntax"]
revision = "d5dd291d400abddb674b2b2acfee6881c1c8f8e5"
revision = "f699e8d03646cb8e6e15410ced7bff37fcf8dddd"
[[projects]]
name = "github.com/blang/semver"
@ -109,12 +109,6 @@
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
version = "v3.5.1"
[[projects]]
name = "github.com/boltdb/bolt"
packages = ["."]
revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8"
version = "v1.3.1"
[[projects]]
branch = "master"
name = "github.com/brandur/simplebox"
@ -127,6 +121,12 @@
revision = "7cd7992b3bc86f920394f8de92c13900da1a46b7"
version = "v3.2.0"
[[projects]]
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9"
version = "v1.3.0"
[[projects]]
branch = "master"
name = "github.com/dgryski/go-failure"
@ -325,10 +325,10 @@
revision = "d2f86524cced5186554df90d92529757d22c1cb6"
[[projects]]
branch = "master"
name = "github.com/magefile/mage"
packages = ["mg","types"]
revision = "63768081a3236a7c6c53ef72e402ae1fe1664b61"
revision = "ab3ca2f6f85577d7ec82e0a6df721147a2e737f9"
version = "v2.0.1"
[[projects]]
name = "github.com/mattn/go-isatty"
@ -525,6 +525,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "207902d7a1c84bb5bca1b004cbb19f67e06f2231f9ad48b2129698bf47f115a7"
inputs-digest = "dd3b3341036bb95c8a409729fa12b897e6515c32cfaae8218cf27d60ad1a3b07"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -39,7 +39,7 @@
[[constraint]]
name = "github.com/asdine/storm"
version = "1.1.0"
version = "2.0.0"
[[constraint]]
branch = "master"
@ -47,7 +47,7 @@
[[constraint]]
name = "github.com/caarlos0/env"
version = "3.0.0"
version = "3.2.0"
[[constraint]]
branch = "master"
@ -69,6 +69,10 @@
name = "github.com/google/gops"
version = "0.3.2"
[[constraint]]
name = "github.com/hashicorp/terraform"
version = "0.11.1"
[[constraint]]
name = "github.com/joho/godotenv"
version = "1.2.0"
@ -82,8 +86,12 @@
name = "github.com/kr/pretty"
[[constraint]]
branch = "master"
name = "github.com/lucas-clemente/quic-go"
version = "0.6.0"
[[constraint]]
name = "github.com/magefile/mage"
version = "2.0.1"
[[constraint]]
branch = "master"
@ -103,7 +111,7 @@
[[constraint]]
name = "github.com/xtaci/kcp-go"
version = "3.19.0"
version = "3.23.0"
[[constraint]]
name = "github.com/xtaci/smux"
@ -111,7 +119,7 @@
[[constraint]]
name = "go.uber.org/atomic"
version = "1.2.0"
version = "1.3.1"
[[constraint]]
branch = "master"
@ -123,12 +131,8 @@
[[constraint]]
name = "google.golang.org/grpc"
version = "1.6.0"
version = "1.9.0"
[[constraint]]
name = "gopkg.in/alecthomas/kingpin.v2"
version = "2.2.5"
[[constraint]]
branch = "master"
name = "github.com/mmatczuk/go-http-tunnel"
version = "2.2.6"

View File

@ -4,10 +4,9 @@ before_install:
- go get github.com/stretchr/testify
go:
- 1.5
- 1.6
- 1.7
- 1.8
- 1.9
- tip
script:

View File

@ -1,11 +1,10 @@
# Storm
[![Join the chat at https://gitter.im/asdine/storm](https://badges.gitter.im/asdine/storm.svg)](https://gitter.im/asdine/storm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://travis-ci.org/asdine/storm.svg)](https://travis-ci.org/asdine/storm)
[![GoDoc](https://godoc.org/github.com/asdine/storm?status.svg)](https://godoc.org/github.com/asdine/storm)
[![Go Report Card](https://goreportcard.com/badge/github.com/asdine/storm)](https://goreportcard.com/report/github.com/asdine/storm)
Storm is a simple and powerful toolkit for [BoltDB](https://github.com/boltdb/bolt). Basically, Storm provides indexes, a wide range of methods to store and fetch data, an advanced query system, and much more.
Storm is a simple and powerful toolkit for [BoltDB](https://github.com/coreos/bbolt). Basically, Storm provides indexes, a wide range of methods to store and fetch data, an advanced query system, and much more.
In addition to the examples below, see also the [examples in the GoDoc](https://godoc.org/github.com/asdine/storm#pkg-examples).
@ -43,7 +42,6 @@ In addition to the examples below, see also the [examples in the GoDoc](https://
- [Node options](#node-options)
- [Simple Key/Value store](#simple-keyvalue-store)
- [BoltDB](#boltdb)
- [Migrations](#migrations)
- [License](#license)
- [Credits](#credits)
@ -62,6 +60,7 @@ import "github.com/asdine/storm"
## Open a database
Quick way of opening a database
```go
db, err := storm.Open("my.db")
@ -103,11 +102,11 @@ type Base struct {
}
type User struct {
Base `storm:"inline"`
Group string `storm:"index"`
Email string `storm:"unique"`
Name string
CreatedAt time.Time `storm:"index"`
Base `storm:"inline"`
Group string `storm:"index"`
Email string `storm:"unique"`
Name string
CreatedAt time.Time `storm:"index"`
}
```
@ -142,11 +141,11 @@ Storm can auto increment integer values so you don't have to worry about that wh
```go
type Product struct {
Pk int `storm:"id,increment"` // primary key with auto increment
Name string
IntegerField uint64 `storm:"increment"`
IndexedIntegerField uint32 `storm:"index,increment"`
UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set
Pk int `storm:"id,increment"` // primary key with auto increment
Name string
IntegerField uint64 `storm:"increment"`
IndexedIntegerField uint32 `storm:"index,increment"`
UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set
}
p := Product{Name: "Vaccum Cleaner"}
@ -175,7 +174,7 @@ fmt.Println(p.UniqueIntegerField)
### Simple queries
Any object can be fetched, indexed or not. Storm uses indexes when available, otherwhise it uses the [query system](#advanced-queries).
Any object can be fetched, indexed or not. Storm uses indexes when available, otherwise it uses the [query system](#advanced-queries).
#### Fetch one object
@ -433,6 +432,7 @@ if err != nil {
return tx.Commit()
```
### Options
Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options.
@ -462,12 +462,12 @@ These can be used by importing the relevant package and use that codec to config
```go
import (
"github.com/asdine/storm"
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json"
"github.com/asdine/storm/codec/sereal"
"github.com/asdine/storm/codec/protobuf"
"github.com/asdine/storm/codec/msgpack"
"github.com/asdine/storm"
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json"
"github.com/asdine/storm/codec/sereal"
"github.com/asdine/storm/codec/protobuf"
"github.com/asdine/storm/codec/msgpack"
)
var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
@ -490,7 +490,7 @@ db := storm.Open("my.db", storm.UseDB(bDB))
#### Batch mode
Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/boltdb/bolt#batch-read-write-transactions))
Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/coreos/bbolt#batch-read-write-transactions))
```go
db := storm.Open("my.db", storm.Batch())
@ -546,16 +546,19 @@ n := db.From("my-node")
```
Give a bolt.Tx transaction to the Node
```go
n = n.WithTransaction(tx)
```
Enable batch mode
```go
n = n.WithBatch(true)
```
Use a Codec
```go
n = n.WithCodec(gob.Codec)
```
@ -566,6 +569,7 @@ Storm can be used as a simple, robust, key/value store that can store anything.
The key and the value can be of any type as long as the key is not a zero value.
Saving data :
```go
db.Set("logs", time.Now(), "I'm eating my breakfast man")
db.Set("sessions", bson.NewObjectId(), &someUser)
@ -576,6 +580,7 @@ db.Set("weird storage", "754-3010", map[string]interface{}{
```
Fetching data :
```go
user := User{}
db.Get("sessions", someObjectId, &user)
@ -587,6 +592,7 @@ db.Get("sessions", someObjectId, &details)
```
Deleting data :
```go
db.Delete("sessions", someObjectId)
db.Delete("weird storage", "754-3010")
@ -617,11 +623,6 @@ db.Bolt.Update(func(tx *bolt.Tx) error {
})
```
## Migrations
You can use the migration tool to migrate databases that use older version of Storm.
See this [README](https://github.com/asdine/storm-migrator) for more informations.
## License
MIT

View File

@ -7,7 +7,7 @@ import (
)
func BenchmarkFindWithIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement())
db, cleanup := createDB(b)
defer cleanup()
var users []User
@ -37,7 +37,7 @@ func BenchmarkFindWithIndex(b *testing.B) {
}
func BenchmarkFindWithoutIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement())
db, cleanup := createDB(b)
defer cleanup()
var users []User
@ -67,7 +67,7 @@ func BenchmarkFindWithoutIndex(b *testing.B) {
}
func BenchmarkOneWithIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement())
db, cleanup := createDB(b)
defer cleanup()
var u User
@ -89,7 +89,7 @@ func BenchmarkOneWithIndex(b *testing.B) {
}
func BenchmarkOneByID(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement())
db, cleanup := createDB(b)
defer cleanup()
type User struct {
@ -120,7 +120,7 @@ func BenchmarkOneByID(b *testing.B) {
}
func BenchmarkOneWithoutIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement())
db, cleanup := createDB(b)
defer cleanup()
var u User
@ -142,7 +142,7 @@ func BenchmarkOneWithoutIndex(b *testing.B) {
}
func BenchmarkSave(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement())
db, cleanup := createDB(b)
defer cleanup()
w := User{Name: "John"}

View File

@ -1,6 +1,6 @@
package storm
import "github.com/boltdb/bolt"
import "github.com/coreos/bbolt"
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.

View File

@ -16,9 +16,9 @@ func TestBucket(t *testing.T) {
t.Fatal(err)
}
require.Nil(t, db.root.GetBucket(readTx, "none"))
require.Nil(t, db.GetBucket(readTx, "none"))
b, err := db.root.CreateBucketIfNotExists(readTx, "new")
b, err := db.CreateBucketIfNotExists(readTx, "new")
// Cannot create buckets in a read transaction
require.Error(t, err)
@ -36,9 +36,9 @@ func TestBucket(t *testing.T) {
t.Fatal(err)
}
require.Nil(t, db.root.GetBucket(writeTx, "none"))
require.Nil(t, db.GetBucket(writeTx, "none"))
b, err = db.root.CreateBucketIfNotExists(writeTx, "new")
b, err = db.CreateBucketIfNotExists(writeTx, "new")
require.NoError(t, err)
require.NotNil(t, b)
@ -59,8 +59,8 @@ func TestBucket(t *testing.T) {
t.Fatal(err)
}
require.NotNil(t, db.root.GetBucket(readTx, "new"))
require.Nil(t, db.root.GetBucket(readTx, "c"))
require.NotNil(t, db.GetBucket(readTx, "new"))
require.Nil(t, db.GetBucket(readTx, "c"))
require.NotNil(t, n2.GetBucket(readTx, "c"))
readTx.Rollback()

View File

@ -43,9 +43,6 @@ var (
// ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
ErrNotInTransaction = errors.New("not in transaction")
// ErrUnAddressable is returned when a struct or an exported field of a struct is unaddressable
ErrUnAddressable = errors.New("unaddressable value")
// ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field
ErrIncompatibleValue = errors.New("incompatible value")

View File

@ -10,7 +10,8 @@ import (
"time"
"github.com/asdine/storm"
"github.com/boltdb/bolt"
"github.com/asdine/storm/codec/gob"
"github.com/coreos/bbolt"
)
func ExampleDB_Save() {
@ -18,7 +19,7 @@ func ExampleDB_Save() {
defer os.RemoveAll(dir)
type User struct {
ID int `storm:"id"`
ID int `storm:"id,increment"` // the increment tag will auto-increment integer IDs without existing values.
Group string `storm:"index"`
Email string `storm:"unique"`
Name string
@ -27,8 +28,7 @@ func ExampleDB_Save() {
}
// Open takes an optional list of options as the last argument.
// AutoIncrement will auto-increment integer IDs without existing values.
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement())
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.Codec(gob.Codec))
defer db.Close()
user := User{
@ -497,7 +497,7 @@ func ExampleNode_RangeScan() {
}
type User struct {
ID int `storm:"id"`
ID int `storm:"id,increment"`
Group string `storm:"index"`
Email string `storm:"unique"`
Name string
@ -506,7 +506,7 @@ type User struct {
}
type Account struct {
ID int `storm:"id"`
ID int `storm:"id,increment"`
Amount int64 // amount in cents
}
@ -517,7 +517,7 @@ type Note struct {
func prepareDB() (string, *storm.DB) {
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement())
db, _ := storm.Open(filepath.Join(dir, "storm.db"))
for i, name := range []string{"John", "Eric", "Dilbert"} {
email := strings.ToLower(name + "@provider.com")

View File

@ -7,7 +7,7 @@ import (
"strings"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// Storm tags

View File

@ -6,11 +6,11 @@ import (
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// A Finder can fetch types from BoltDB
type Finder interface {
// A finder can fetch types from BoltDB
type finder interface {
// One returns one record by the specified index
One(fieldName string, value interface{}, to interface{}) error
@ -79,7 +79,7 @@ func (n *node) One(fieldName string, value interface{}, to interface{}) error {
return sink.flush()
}
val, err := toBytes(value, n.s.codec)
val, err := toBytes(value, n.codec)
if err != nil {
return err
}
@ -119,7 +119,7 @@ func (n *node) one(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig,
return ErrNotFound
}
return n.s.codec.Unmarshal(raw, to)
return n.codec.Unmarshal(raw, to)
}
// Find returns one or more records by the specified index
@ -164,7 +164,7 @@ func (n *node) Find(fieldName string, value interface{}, to interface{}, options
return sink.flush()
}
val, err := toBytes(value, n.s.codec)
val, err := toBytes(value, n.codec)
if err != nil {
return err
}
@ -280,7 +280,7 @@ func (n *node) allByIndex(tx *bolt.Tx, fieldName string, cfg *structConfig, ref
return ErrNotFound
}
err = n.s.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
err = n.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
if err != nil {
return err
}
@ -359,12 +359,12 @@ func (n *node) Range(fieldName string, min, max, to interface{}, options ...func
return sink.flush()
}
mn, err := toBytes(min, n.s.codec)
mn, err := toBytes(min, n.codec)
if err != nil {
return err
}
mx, err := toBytes(max, n.s.codec)
mx, err := toBytes(max, n.codec)
if err != nil {
return err
}
@ -450,7 +450,7 @@ func (n *node) Prefix(fieldName string, prefix string, to interface{}, options .
return sink.flush()
}
prfx, err := toBytes(prefix, n.s.codec)
prfx, err := toBytes(prefix, n.codec)
if err != nil {
return err
}

View File

@ -8,7 +8,7 @@ import (
"testing"
"time"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/stretchr/testify/require"
)
@ -110,7 +110,7 @@ func TestFind(t *testing.T) {
}
func TestFindNil(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement())
db, cleanup := createDB(t)
defer cleanup()
type User struct {
@ -150,11 +150,11 @@ func TestFindNil(t *testing.T) {
}
func TestFindIntIndex(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement())
db, cleanup := createDB(t)
defer cleanup()
type Score struct {
ID int
ID int `storm:"increment"`
Score uint64 `storm:"index"`
}

View File

@ -4,7 +4,7 @@ import (
"bytes"
"github.com/asdine/storm/internal"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// NewListIndex loads a ListIndex

View File

@ -11,7 +11,7 @@ import (
"github.com/asdine/storm"
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/stretchr/testify/require"
)

View File

@ -4,7 +4,7 @@ import (
"bytes"
"github.com/asdine/storm/internal"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// NewUniqueIndex loads a UniqueIndex

View File

@ -10,7 +10,7 @@ import (
"github.com/asdine/storm"
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/stretchr/testify/require"
)

View File

@ -3,7 +3,7 @@ package internal
import (
"bytes"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// Cursor that can be reversed

18
vendor/github.com/asdine/storm/kv.go generated vendored
View File

@ -3,11 +3,11 @@ package storm
import (
"reflect"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// KeyValueStore can store and fetch values by key
type KeyValueStore interface {
// keyValueStore can store and fetch values by key
type keyValueStore interface {
// Get a value from a bucket
Get(bucketName string, key interface{}, to interface{}) error
// Set a key/value pair into a bucket
@ -22,7 +22,7 @@ type KeyValueStore interface {
// GetBytes gets a raw value from a bucket.
func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) {
id, err := toBytes(key, n.s.codec)
id, err := toBytes(key, n.codec)
if err != nil {
return nil, err
}
@ -61,7 +61,7 @@ func (n *node) SetBytes(bucketName string, key interface{}, value []byte) error
return ErrNilParam
}
id, err := toBytes(key, n.s.codec)
id, err := toBytes(key, n.codec)
if err != nil {
return err
}
@ -94,7 +94,7 @@ func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
return ErrPtrNeeded
}
id, err := toBytes(key, n.s.codec)
id, err := toBytes(key, n.codec)
if err != nil {
return err
}
@ -105,7 +105,7 @@ func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
return err
}
return n.s.codec.Unmarshal(raw, to)
return n.codec.Unmarshal(raw, to)
})
}
@ -114,7 +114,7 @@ func (n *node) Set(bucketName string, key interface{}, value interface{}) error
var data []byte
var err error
if value != nil {
data, err = n.s.codec.Marshal(value)
data, err = n.codec.Marshal(value)
if err != nil {
return err
}
@ -125,7 +125,7 @@ func (n *node) Set(bucketName string, key interface{}, value interface{}) error
// Delete deletes a key from a bucket
func (n *node) Delete(bucketName string, key interface{}) error {
id, err := toBytes(key, n.s.codec)
id, err := toBytes(key, n.codec)
if err != nil {
return err
}

View File

@ -7,7 +7,7 @@ import (
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/stretchr/testify/require"
)

View File

@ -3,7 +3,7 @@ package storm
import (
"reflect"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
const (

View File

@ -2,15 +2,15 @@ package storm
import (
"github.com/asdine/storm/codec"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// A Node in Storm represents the API to a BoltDB bucket.
type Node interface {
Tx
TypeStore
KeyValueStore
BucketScanner
tx
typeStore
keyValueStore
bucketScanner
// From returns a new Storm node with a new bucket root below the current.
// All DB operations on the new node will be executed relative to this bucket.
From(addend ...string) Node

View File

@ -5,7 +5,7 @@ import (
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/stretchr/testify/require"
)
@ -17,13 +17,13 @@ func TestNode(t *testing.T) {
node1, ok := n1.(*node)
require.True(t, ok)
require.Equal(t, db, node1.s)
require.NotEqual(t, db.root, n1)
require.Equal(t, []string{"a"}, db.root.rootBucket)
require.Equal(t, []string{"b", "c"}, node1.rootBucket)
require.NotEqual(t, db.Node, n1)
require.Equal(t, []string{"a"}, db.Node.(*node).rootBucket)
require.Equal(t, []string{"a", "b", "c"}, node1.rootBucket)
n2 := n1.From("d", "e")
node2, ok := n2.(*node)
require.True(t, ok)
require.Equal(t, []string{"b", "c", "d", "e"}, node2.rootBucket)
require.Equal(t, []string{"a", "b", "c", "d", "e"}, node2.rootBucket)
}
func TestNodeWithTransaction(t *testing.T) {
@ -46,17 +46,90 @@ func TestNodeWithTransaction(t *testing.T) {
}
func TestNodeWithCodec(t *testing.T) {
db, cleanup := createDB(t)
defer cleanup()
t.Run("Inheritance", func(t *testing.T) {
db, cleanup := createDB(t)
defer cleanup()
n := db.From("a").(*node)
require.Equal(t, json.Codec, n.codec)
n = n.From("b", "c", "d").(*node)
require.Equal(t, json.Codec, n.codec)
n = db.WithCodec(gob.Codec).(*node)
n = n.From("e").(*node)
require.Equal(t, gob.Codec, n.codec)
o := n.From("f").WithCodec(json.Codec).(*node)
require.Equal(t, gob.Codec, n.codec)
require.Equal(t, json.Codec, o.codec)
n := db.From("a").(*node)
require.Equal(t, json.Codec, n.codec)
n = n.From("b", "c", "d").(*node)
require.Equal(t, json.Codec, n.codec)
n = db.WithCodec(gob.Codec).(*node)
n = n.From("e").(*node)
require.Equal(t, gob.Codec, n.codec)
o := n.From("f").WithCodec(json.Codec).(*node)
require.Equal(t, gob.Codec, n.codec)
require.Equal(t, json.Codec, o.codec)
})
t.Run("CodecCall", func(t *testing.T) {
db, cleanup := createDB(t)
defer cleanup()
type User struct {
ID int
Name string `storm:"index"`
}
requireBytesEqual := func(raw []byte, expected interface{}) {
var u User
err := gob.Codec.Unmarshal(raw, &u)
require.NoError(t, err)
require.Equal(t, expected, u)
}
n := db.From("a").WithCodec(gob.Codec)
err := n.Set("gobBucket", "key", &User{ID: 10, Name: "John"})
require.NoError(t, err)
b, err := n.GetBytes("gobBucket", "key")
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "John"})
id, err := toBytes(10, n.(*node).codec)
require.NoError(t, err)
err = n.Save(&User{ID: 10, Name: "John"})
require.NoError(t, err)
b, err = n.GetBytes("User", id)
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "John"})
err = n.Update(&User{ID: 10, Name: "Jack"})
require.NoError(t, err)
b, err = n.GetBytes("User", id)
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "Jack"})
err = n.UpdateField(&User{ID: 10}, "Name", "John")
require.NoError(t, err)
b, err = n.GetBytes("User", id)
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "John"})
var users []User
err = n.Find("Name", "John", &users)
require.NoError(t, err)
var user User
err = n.One("Name", "John", &user)
require.NoError(t, err)
err = n.AllByIndex("Name", &users)
require.NoError(t, err)
err = n.All(&users)
require.NoError(t, err)
err = n.Range("Name", "J", "K", &users)
require.NoError(t, err)
err = n.Prefix("Name", "J", &users)
require.NoError(t, err)
_, err = n.Count(new(User))
require.NoError(t, err)
err = n.Select().Find(&users)
require.NoError(t, err)
})
}

View File

@ -5,57 +5,48 @@ import (
"github.com/asdine/storm/codec"
"github.com/asdine/storm/index"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// BoltOptions used to pass options to BoltDB.
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*DB) error {
return func(d *DB) error {
d.boltMode = mode
d.boltOptions = options
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*Options) error {
return func(opts *Options) error {
opts.boltMode = mode
opts.boltOptions = options
return nil
}
}
// Codec used to set a custom encoder and decoder. The default is JSON.
func Codec(c codec.MarshalUnmarshaler) func(*DB) error {
return func(d *DB) error {
d.codec = c
func Codec(c codec.MarshalUnmarshaler) func(*Options) error {
return func(opts *Options) error {
opts.codec = c
return nil
}
}
// Batch enables the use of batch instead of update for read-write transactions.
func Batch() func(*DB) error {
return func(d *DB) error {
d.batchMode = true
return nil
}
}
// AutoIncrement used to enable bolt.NextSequence on empty integer ids.
// Deprecated: Set the increment tag to the id field instead.
func AutoIncrement() func(*DB) error {
return func(d *DB) error {
d.autoIncrement = true
func Batch() func(*Options) error {
return func(opts *Options) error {
opts.batchMode = true
return nil
}
}
// Root used to set the root bucket. See also the From method.
func Root(root ...string) func(*DB) error {
return func(d *DB) error {
d.rootBucket = root
func Root(root ...string) func(*Options) error {
return func(opts *Options) error {
opts.rootBucket = root
return nil
}
}
// UseDB allow Storm to use an existing open Bolt.DB.
// UseDB allows Storm to use an existing open Bolt.DB.
// Warning: storm.DB.Close() will close the bolt.DB instance.
func UseDB(b *bolt.DB) func(*DB) error {
return func(d *DB) error {
d.Path = b.Path()
d.Bolt = b
func UseDB(b *bolt.DB) func(*Options) error {
return func(opts *Options) error {
opts.path = b.Path()
opts.bolt = b
return nil
}
}
@ -80,3 +71,27 @@ func Reverse() func(*index.Options) {
opts.Reverse = true
}
}
// Options are used to customize the way Storm opens a database.
type Options struct {
// Handles encoding and decoding of objects
codec codec.MarshalUnmarshaler
// Bolt file mode
boltMode os.FileMode
// Bolt options
boltOptions *bolt.Options
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
// The root bucket name
rootBucket []string
// Path of the database file
path string
// Bolt is still easily accessible
bolt *bolt.DB
}

View File

@ -37,7 +37,7 @@ func ExampleRe() {
}
type User struct {
ID int `storm:"id"`
ID int `storm:"id,increment"`
Group string `storm:"index"`
Email string `storm:"unique"`
Name string
@ -47,7 +47,7 @@ type User struct {
func prepareDB() (string, *storm.DB) {
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement())
db, _ := storm.Open(filepath.Join(dir, "storm.db"))
for i, name := range []string{"John", "Norm", "Donald", "Eric", "Dilbert"} {
email := strings.ToLower(name + "@provider.com")

View File

@ -3,7 +3,7 @@ package storm
import (
"github.com/asdine/storm/internal"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// Select a list of records that match a list of matchers. Doesn't use indexes.

View File

@ -10,12 +10,12 @@ import (
)
type Score struct {
ID int
ID int `storm:"increment"`
Value int
}
func prepareScoreDB(t *testing.T) (*DB, func()) {
db, cleanup := createDB(t, AutoIncrement())
db, cleanup := createDB(t)
for i := 0; i < 20; i++ {
err := db.Save(&Score{
@ -492,7 +492,7 @@ func TestSelectCount(t *testing.T) {
}
func TestSelectRaw(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement(), Codec(json.Codec))
db, cleanup := createDB(t, Codec(json.Codec))
defer cleanup()
for i := 0; i < 20; i++ {
@ -520,7 +520,7 @@ func TestSelectRaw(t *testing.T) {
}
func TestSelectEach(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement(), Codec(json.Codec))
db, cleanup := createDB(t, Codec(json.Codec))
defer cleanup()
for i := 0; i < 20; i++ {

View File

@ -3,11 +3,11 @@ package storm
import (
"bytes"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// A BucketScanner scans a Node for a list of buckets
type BucketScanner interface {
// A bucketScanner scans a Node for a list of buckets
type bucketScanner interface {
// PrefixScan scans the root buckets for keys matching the given prefix.
PrefixScan(prefix string) []Node
// PrefixScan scans the buckets in this node for keys matching the given prefix.

View File

@ -6,7 +6,7 @@ import (
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
type item struct {

View File

@ -6,12 +6,12 @@ import (
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
// TypeStore stores user defined types in BoltDB
type TypeStore interface {
Finder
// typeStore stores user defined types in BoltDB
type typeStore interface {
finder
// Init creates the indexes and buckets for a given structure
Init(data interface{}) error
@ -32,10 +32,6 @@ type TypeStore interface {
// DeleteStruct deletes a structure from the associated bucket
DeleteStruct(data interface{}) error
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
Remove(data interface{}) error
}
// Init creates the indexes and buckets for a given structure
@ -152,7 +148,7 @@ func (n *node) Save(data interface{}) error {
}
if cfg.ID.IsZero {
if !cfg.ID.IsInteger || (!n.s.autoIncrement && !cfg.ID.Increment) {
if !cfg.ID.IsInteger || !cfg.ID.Increment {
return ErrZeroID
}
}
@ -181,7 +177,7 @@ func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update boo
}
}
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec)
id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
if err != nil {
return err
}
@ -215,7 +211,7 @@ func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update boo
continue
}
value, err := toBytes(fieldCfg.Value.Interface(), n.s.codec)
value, err := toBytes(fieldCfg.Value.Interface(), n.codec)
if err != nil {
return err
}
@ -250,7 +246,7 @@ func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update boo
}
}
raw, err := n.s.codec.Marshal(data)
raw, err := n.codec.Marshal(data)
if err != nil {
return err
}
@ -385,7 +381,7 @@ func (n *node) DeleteStruct(data interface{}) error {
return err
}
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec)
id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
if err != nil {
return err
}
@ -427,9 +423,3 @@ func (n *node) deleteStruct(tx *bolt.Tx, cfg *structConfig, id []byte) error {
return bucket.Delete(id)
}
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (n *node) Remove(data interface{}) error {
return n.DeleteStruct(data)
}

View File

@ -9,7 +9,7 @@ import (
"github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/stretchr/testify/require"
)
@ -154,7 +154,7 @@ func TestSave(t *testing.T) {
val := bucket.Get(i)
require.NotNil(t, val)
content, err := db.codec.Marshal(&v)
content, err := db.Codec().Marshal(&v)
require.NoError(t, err)
require.Equal(t, content, val)
return nil
@ -312,43 +312,8 @@ func TestSaveEmptyValues(t *testing.T) {
require.Error(t, err)
}
func TestSaveAutoIncrement(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement())
defer cleanup()
for i := 1; i < 10; i++ {
s := SimpleUser{Name: "John"}
err := db.Save(&s)
require.NoError(t, err)
require.Equal(t, i, s.ID)
}
u := UserWithUint64IDField{Name: "John"}
err := db.Save(&u)
require.NoError(t, err)
require.Equal(t, uint64(1), u.ID)
v := UserWithUint64IDField{}
err = db.One("ID", uint64(1), &v)
require.NoError(t, err)
require.Equal(t, u, v)
ui := UserWithIDField{Name: "John"}
err = db.Save(&ui)
require.NoError(t, err)
require.Equal(t, 1, ui.ID)
vi := UserWithIDField{}
err = db.One("ID", 1, &vi)
require.NoError(t, err)
require.Equal(t, ui, vi)
us := UserWithStringIDField{Name: "John"}
err = db.Save(&us)
require.Error(t, err)
require.Equal(t, ErrZeroID, err)
}
func TestSaveIncrement(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement())
db, cleanup := createDB(t)
defer cleanup()
type User struct {
@ -382,7 +347,7 @@ func TestSaveDifferentBucketRoot(t *testing.T) {
db, cleanup := createDB(t)
defer cleanup()
require.Len(t, db.rootBucket, 0)
require.Len(t, db.Node.(*node).rootBucket, 0)
dbSub := db.From("sub").(*node)
@ -413,10 +378,9 @@ func TestSaveDifferentBucketRoot(t *testing.T) {
func TestSaveEmbedded(t *testing.T) {
db, cleanup := createDB(t)
defer cleanup()
AutoIncrement()(db)
type Base struct {
ID int `storm:"id"`
ID int `storm:"id,increment"`
}
type User struct {

View File

@ -3,14 +3,11 @@ package storm
import (
"bytes"
"encoding/binary"
"os"
"time"
"github.com/asdine/storm/codec"
"github.com/asdine/storm/codec/json"
"github.com/asdine/storm/index"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
)
const (
@ -22,95 +19,65 @@ const (
var defaultCodec = json.Codec
// Open opens a database at the given path with optional Storm options.
func Open(path string, stormOptions ...func(*DB) error) (*DB, error) {
func Open(path string, stormOptions ...func(*Options) error) (*DB, error) {
var err error
s := &DB{
Path: path,
codec: defaultCodec,
}
var opts Options
for _, option := range stormOptions {
if err = option(s); err != nil {
if err = option(&opts); err != nil {
return nil, err
}
}
if s.boltMode == 0 {
s.boltMode = 0600
s := DB{
Bolt: opts.bolt,
}
if s.boltOptions == nil {
s.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
n := node{
s: &s,
codec: opts.codec,
batchMode: opts.batchMode,
rootBucket: opts.rootBucket,
}
s.root = &node{s: s, rootBucket: s.rootBucket, codec: s.codec, batchMode: s.batchMode}
if n.codec == nil {
n.codec = defaultCodec
}
if opts.boltMode == 0 {
opts.boltMode = 0600
}
if opts.boltOptions == nil {
opts.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
}
s.Node = &n
// skip if UseDB option is used
if s.Bolt == nil {
s.Bolt, err = bolt.Open(path, s.boltMode, s.boltOptions)
if err != nil {
return nil, err
}
err = s.checkVersion()
s.Bolt, err = bolt.Open(path, opts.boltMode, opts.boltOptions)
if err != nil {
return nil, err
}
}
return s, nil
err = s.checkVersion()
if err != nil {
return nil, err
}
return &s, nil
}
// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
// needed operations
type DB struct {
// Path of the database file
Path string
// Handles encoding and decoding of objects
codec codec.MarshalUnmarshaler
// The root node that points to the root bucket.
Node
// Bolt is still easily accessible
Bolt *bolt.DB
// Bolt file mode
boltMode os.FileMode
// Bolt options
boltOptions *bolt.Options
// Enable auto increment on empty integer fields
autoIncrement bool
// The root node that points to the root bucket.
root *node
// The root bucket name
rootBucket []string
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
}
// From returns a new Storm node with a new bucket root.
// All DB operations on the new node will be executed relative to the given
// bucket.
func (s *DB) From(root ...string) Node {
newNode := *s.root
newNode.rootBucket = root
return &newNode
}
// WithTransaction returns a New Storm node that will use the given transaction.
func (s *DB) WithTransaction(tx *bolt.Tx) Node {
return s.root.WithTransaction(tx)
}
// Bucket returns the root bucket name as a slice.
// In the normal, simple case this will be empty.
func (s *DB) Bucket() []string {
return s.root.Bucket()
}
// Close the database
@ -118,167 +85,6 @@ func (s *DB) Close() error {
return s.Bolt.Close()
}
// Codec returns the EncodeDecoder used by this instance of Storm
func (s *DB) Codec() codec.MarshalUnmarshaler {
return s.codec
}
// WithCodec returns a New Storm Node that will use the given Codec.
func (s *DB) WithCodec(codec codec.MarshalUnmarshaler) Node {
n := s.From().(*node)
n.codec = codec
return n
}
// WithBatch returns a new Storm Node with the batch mode enabled.
func (s *DB) WithBatch(enabled bool) Node {
n := s.From().(*node)
n.batchMode = enabled
return n
}
// Get a value from a bucket
func (s *DB) Get(bucketName string, key interface{}, to interface{}) error {
return s.root.Get(bucketName, key, to)
}
// Set a key/value pair into a bucket
func (s *DB) Set(bucketName string, key interface{}, value interface{}) error {
return s.root.Set(bucketName, key, value)
}
// Delete deletes a key from a bucket
func (s *DB) Delete(bucketName string, key interface{}) error {
return s.root.Delete(bucketName, key)
}
// GetBytes gets a raw value from a bucket.
func (s *DB) GetBytes(bucketName string, key interface{}) ([]byte, error) {
return s.root.GetBytes(bucketName, key)
}
// SetBytes sets a raw value into a bucket.
func (s *DB) SetBytes(bucketName string, key interface{}, value []byte) error {
return s.root.SetBytes(bucketName, key, value)
}
// Save a structure
func (s *DB) Save(data interface{}) error {
return s.root.Save(data)
}
// PrefixScan scans the root buckets for keys matching the given prefix.
func (s *DB) PrefixScan(prefix string) []Node {
return s.root.PrefixScan(prefix)
}
// RangeScan scans the root buckets over a range such as a sortable time range.
func (s *DB) RangeScan(min, max string) []Node {
return s.root.RangeScan(min, max)
}
// Select a list of records that match a list of matchers. Doesn't use indexes.
func (s *DB) Select(matchers ...q.Matcher) Query {
return s.root.Select(matchers...)
}
// Range returns one or more records by the specified index within the specified range
func (s *DB) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
return s.root.Range(fieldName, min, max, to, options...)
}
// Prefix returns one or more records whose given field starts with the specified prefix.
func (s *DB) Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error {
return s.root.Prefix(fieldName, prefix, to, options...)
}
// AllByIndex gets all the records of a bucket that are indexed in the specified index
func (s *DB) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
return s.root.AllByIndex(fieldName, to, options...)
}
// All get all the records of a bucket
func (s *DB) All(to interface{}, options ...func(*index.Options)) error {
return s.root.All(to, options...)
}
// Count counts all the records of a bucket
func (s *DB) Count(data interface{}) (int, error) {
return s.root.Count(data)
}
// DeleteStruct deletes a structure from the associated bucket
func (s *DB) DeleteStruct(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (s *DB) Remove(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Drop a bucket
func (s *DB) Drop(data interface{}) error {
return s.root.Drop(data)
}
// Find returns one or more records by the specified index
func (s *DB) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
return s.root.Find(fieldName, value, to, options...)
}
// Init creates the indexes and buckets for a given structure
func (s *DB) Init(data interface{}) error {
return s.root.Init(data)
}
// ReIndex rebuilds all the indexes of a bucket
func (s *DB) ReIndex(data interface{}) error {
return s.root.ReIndex(data)
}
// One returns one record by the specified index
func (s *DB) One(fieldName string, value interface{}, to interface{}) error {
return s.root.One(fieldName, value, to)
}
// Begin starts a new transaction.
func (s *DB) Begin(writable bool) (Node, error) {
return s.root.Begin(writable)
}
// Rollback closes the transaction and ignores all previous updates.
func (s *DB) Rollback() error {
return s.root.Rollback()
}
// Commit writes all changes to disk.
func (s *DB) Commit() error {
return s.root.Rollback()
}
// Update a structure
func (s *DB) Update(data interface{}) error {
return s.root.Update(data)
}
// UpdateField updates a single field
func (s *DB) UpdateField(data interface{}, fieldName string, value interface{}) error {
return s.root.UpdateField(data, fieldName, value)
}
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
func (s *DB) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
return s.root.CreateBucketIfNotExists(tx, bucket)
}
// GetBucket returns the given bucket below the current node.
func (s *DB) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
return s.root.GetBucket(tx, children...)
}
func (s *DB) checkVersion() error {
var v string
err := s.Get(dbinfo, "version", &v)
@ -286,8 +92,9 @@ func (s *DB) checkVersion() error {
return err
}
// for now, we only set the current version if it doesn't exist or if v0.5.0
if v == "" || v == "0.5.0" || v == "0.6.0" {
// for now, we only set the current version if it doesn't exist.
// v1 and v2 database files are compatible.
if v == "" {
return s.Set(dbinfo, "version", Version)
}

View File

@ -12,7 +12,7 @@ import (
"time"
"github.com/asdine/storm/codec/json"
"github.com/boltdb/bolt"
"github.com/coreos/bbolt"
"github.com/stretchr/testify/require"
)
@ -33,7 +33,6 @@ func TestNewStorm(t *testing.T) {
require.Implements(t, (*Node)(nil), db)
require.NoError(t, err)
require.Equal(t, file, db.Path)
require.NotNil(t, db.Bolt)
require.Equal(t, defaultCodec, db.Codec())
@ -48,13 +47,9 @@ func TestNewStormWithStormOptions(t *testing.T) {
defer os.RemoveAll(dir)
dc := new(dummyCodec)
db1, _ := Open(filepath.Join(dir, "storm1.db"), BoltOptions(0660, &bolt.Options{Timeout: 10 * time.Second}), Codec(dc), AutoIncrement(), Root("a", "b"))
db1, _ := Open(filepath.Join(dir, "storm1.db"), BoltOptions(0660, &bolt.Options{Timeout: 10 * time.Second}), Codec(dc), Root("a", "b"))
require.Equal(t, dc, db1.Codec())
require.True(t, db1.autoIncrement)
require.Equal(t, os.FileMode(0660), db1.boltMode)
require.Equal(t, 10*time.Second, db1.boltOptions.Timeout)
require.Equal(t, []string{"a", "b"}, db1.rootBucket)
require.Equal(t, []string{"a", "b"}, db1.root.rootBucket)
require.Equal(t, []string{"a", "b"}, db1.Node.(*node).rootBucket)
err := db1.Save(&SimpleUser{ID: 1})
require.NoError(t, err)
@ -70,7 +65,7 @@ func TestNewStormWithBatch(t *testing.T) {
db1, _ := Open(filepath.Join(dir, "storm1.db"), Batch())
defer db1.Close()
require.True(t, db1.root.batchMode)
require.True(t, db1.Node.(*node).batchMode)
n := db1.From().(*node)
require.True(t, n.batchMode)
n = db1.WithBatch(true).(*node)
@ -163,7 +158,7 @@ func TestToBytes(t *testing.T) {
}
}
func createDB(t errorHandler, opts ...func(*DB) error) (*DB, func()) {
func createDB(t errorHandler, opts ...func(*Options) error) (*DB, func()) {
dir, err := ioutil.TempDir(os.TempDir(), "storm")
if err != nil {
t.Error(err)

View File

@ -1,9 +1,9 @@
package storm
import "github.com/boltdb/bolt"
import "github.com/coreos/bbolt"
// Tx is a transaction
type Tx interface {
// tx is a transaction
type tx interface {
// Commit writes all changes to disk.
Commit() error

View File

@ -1,4 +1,4 @@
package storm
// Version of Storm
const Version = "1.0.0"
const Version = "2.0.0"

View File

@ -77,6 +77,8 @@ func (hm HandshakeMessage) ToBody() (HandshakeMessageBody, error) {
body = new(ClientHelloBody)
case HandshakeTypeServerHello:
body = new(ServerHelloBody)
case HandshakeTypeHelloRetryRequest:
body = new(HelloRetryRequestBody)
case HandshakeTypeEncryptedExtensions:
body = new(EncryptedExtensionsBody)
case HandshakeTypeCertificate:

View File

@ -120,6 +120,29 @@ func (ch ClientHelloBody) Truncated() ([]byte, error) {
return chData[:chLen-binderLen], nil
}
// struct {
// ProtocolVersion server_version;
// CipherSuite cipher_suite;
// Extension extensions<2..2^16-1>;
// } HelloRetryRequest;
type HelloRetryRequestBody struct {
Version uint16
CipherSuite CipherSuite
Extensions ExtensionList `tls:"head=2,min=2"`
}
func (hrr HelloRetryRequestBody) Type() HandshakeType {
return HandshakeTypeHelloRetryRequest
}
func (hrr HelloRetryRequestBody) Marshal() ([]byte, error) {
return syntax.Marshal(hrr)
}
func (hrr *HelloRetryRequestBody) Unmarshal(data []byte) (int, error) {
return syntax.Unmarshal(data, hrr)
}
// struct {
// ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */
// Random random;

View File

@ -79,6 +79,16 @@ var (
},
}
// HelloRetryRequest test cases
hrrValidIn = HelloRetryRequestBody{
Version: supportedVersion,
CipherSuite: 0x0001,
Extensions: extListValidIn,
}
hrrEmptyIn = HelloRetryRequestBody{}
hrrValidHex = supportedVersionHex + "0001" + extListValidHex
hrrEmptyHex = supportedVersionHex + "0001" + "0000"
// ServerHello test cases
shValidIn = ServerHelloBody{
Version: tls12Version,
@ -342,6 +352,34 @@ func TestClientHelloTruncate(t *testing.T) {
assertError(t, err, "Truncated a ClientHello with a mal-formed PSK")
}
func TestHelloRetryRequestMarshalUnmarshal(t *testing.T) {
hrrValid := unhex(hrrValidHex)
hrrEmpty := unhex(hrrEmptyHex)
// Test correctness of handshake type
assertEquals(t, (HelloRetryRequestBody{}).Type(), HandshakeTypeHelloRetryRequest)
// Test successful marshal
out, err := hrrValidIn.Marshal()
assertNotError(t, err, "Failed to marshal a valid HelloRetryRequest")
assertByteEquals(t, out, hrrValid)
// Test marshal failure with no extensions present
out, err = hrrEmptyIn.Marshal()
assertError(t, err, "Marshaled HelloRetryRequest with no extensions")
// Test successful unmarshal
var hrr HelloRetryRequestBody
read, err := hrr.Unmarshal(hrrValid)
assertNotError(t, err, "Failed to unmarshal a valid HelloRetryRequest")
assertEquals(t, read, len(hrrValid))
assertDeepEquals(t, hrr, hrrValidIn)
// Test unmarshal failure with no extensions present
read, err = hrr.Unmarshal(hrrEmpty)
assertError(t, err, "Unmarshaled a HelloRetryRequest with no extensions")
}
func TestServerHelloMarshalUnmarshal(t *testing.T) {
shValid := unhex(shValidHex)
shEmpty := unhex(shEmptyHex)

View File

@ -1,28 +0,0 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View File

@ -15,11 +15,11 @@ and setting values. That's it.
## Project Status
Bolt is stable, the API is fixed, and the file format is fixed. Full unit
test coverage and randomized black box testing are used to ensure database
consistency and thread safety. Bolt is currently used in high-load production
environments serving databases as large as 1TB. Many companies such as
Shopify and Heroku use Bolt-backed services every day.
Bolt is stable and the API is fixed. Full unit test coverage and randomized
black box testing are used to ensure database consistency and thread safety.
Bolt is currently in high-load production environments serving databases as
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
services every day.
## Table of Contents
@ -209,7 +209,7 @@ and then safely close your transaction if an error is returned. This is the
recommended way to use Bolt transactions.
However, sometimes you may want to manually start and end your transactions.
You can use the `DB.Begin()` function directly but **please** be sure to close
You can use the `Tx.Begin()` function directly but **please** be sure to close
the transaction.
```go
@ -395,7 +395,7 @@ db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("MyBucket")).Cursor()
prefix := []byte("1234")
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() {
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v)
}
@ -448,10 +448,6 @@ db.View(func(tx *bolt.Tx) error {
})
```
Please note that keys and values in `ForEach()` are only valid while
the transaction is open. If you need to use a key or value outside of
the transaction, you must use `copy()` to copy it to another byte
slice.
### Nested buckets
@ -464,55 +460,6 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
func (*Bucket) DeleteBucket(key []byte) error
```
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
```go
// createUser creates a new user in the given account.
func createUser(accountID int, u *User) error {
// Start the transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Retrieve the root bucket for the account.
// Assume this has already been created when the account was set up.
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
// Setup the users bucket.
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
if err != nil {
return err
}
// Generate an ID for the new user.
userID, err := bkt.NextSequence()
if err != nil {
return err
}
u.ID = userID
// Marshal and save the encoded user.
if buf, err := json.Marshal(u); err != nil {
return err
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
return err
}
// Commit the transaction.
if err := tx.Commit(); err != nil {
return err
}
return nil
}
```
### Database backups
@ -768,9 +715,6 @@ Here are a few things to note when evaluating and using Bolt:
can be reused by a new page or can be unmapped from virtual memory and you'll
see an `unexpected fault address` panic when accessing it.
* Bolt uses an exclusive write lock on the database file so it cannot be
shared by multiple processes.
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
buckets that have random inserts will cause your database to have very poor
page utilization.
@ -904,13 +848,5 @@ Below is a list of public, open source projects that use Bolt:
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
If you are using Bolt in a project please send a pull request to add it to the list.

View File

@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

7
vendor/github.com/coreos/bbolt/bolt_arm.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -89,7 +89,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close()
os.Remove(db.path + lockExt)
os.Remove(db.path+lockExt)
return err
}

View File

@ -130,17 +130,9 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry.
if b.tx.writable && !unaligned {
if b.tx.writable {
child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else {
@ -175,8 +167,9 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists
} else {
return nil, ErrIncompatibleValue
}
return nil, ErrIncompatibleValue
}
// Create empty, inline bucket.
@ -336,28 +329,6 @@ func (b *Bucket) Delete(key []byte) error {
return nil
}
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil {

View File

@ -782,48 +782,6 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
}
}
// Ensure bucket can set and update its sequence number.
func TestBucket_Sequence(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
bkt, err := tx.CreateBucket([]byte("0"))
if err != nil {
t.Fatal(err)
}
// Retrieve sequence.
if v := bkt.Sequence(); v != 0 {
t.Fatalf("unexpected sequence: %d", v)
}
// Update sequence.
if err := bkt.SetSequence(1000); err != nil {
t.Fatal(err)
}
// Read sequence again.
if v := bkt.Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
// Verify sequence in separate transaction.
if err := db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can return an autoincrementing sequence.
func TestBucket_NextSequence(t *testing.T) {
db := MustOpenDB()

View File

@ -552,10 +552,7 @@ func (db *DB) removeTx(tx *Tx) {
// Remove the transaction.
for i, t := range db.txs {
if t == tx {
last := len(db.txs) - 1
db.txs[i] = db.txs[last]
db.txs[last] = nil
db.txs = db.txs[:last]
db.txs = append(db.txs[:i], db.txs[i+1:]...)
break
}
}
@ -955,7 +952,7 @@ func (s *Stats) Sub(other *Stats) Stats {
diff.PendingPageN = s.PendingPageN
diff.FreeAlloc = s.FreeAlloc
diff.FreelistInuse = s.FreelistInuse
diff.TxN = s.TxN - other.TxN
diff.TxN = other.TxN - s.TxN
diff.TxStats = s.TxStats.Sub(&other.TxStats)
return diff
}

View File

@ -12,6 +12,7 @@ import (
"os"
"path/filepath"
"regexp"
"runtime"
"sort"
"strings"
"sync"
@ -179,6 +180,69 @@ func TestOpen_ErrChecksum(t *testing.T) {
}
}
// Ensure that opening an already open database file will timeout.
func TestOpen_Timeout(t *testing.T) {
if runtime.GOOS == "solaris" {
t.Skip("solaris fcntl locks don't support intra-process locking")
}
path := tempfile()
// Open a data file.
db0, err := bolt.Open(path, 0666, nil)
if err != nil {
t.Fatal(err)
} else if db0 == nil {
t.Fatal("expected database")
}
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond})
if err != bolt.ErrTimeout {
t.Fatalf("unexpected timeout: %s", err)
} else if db1 != nil {
t.Fatal("unexpected database")
} else if time.Since(start) <= 100*time.Millisecond {
t.Fatal("expected to wait at least timeout duration")
}
if err := db0.Close(); err != nil {
t.Fatal(err)
}
}
// Ensure that opening an already open database file will wait until its closed.
func TestOpen_Wait(t *testing.T) {
if runtime.GOOS == "solaris" {
t.Skip("solaris fcntl locks don't support intra-process locking")
}
path := tempfile()
// Open a data file.
db0, err := bolt.Open(path, 0666, nil)
if err != nil {
t.Fatal(err)
}
// Close it in just a bit.
time.AfterFunc(100*time.Millisecond, func() { _ = db0.Close() })
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond})
if err != nil {
t.Fatal(err)
} else if time.Since(start) <= 100*time.Millisecond {
t.Fatal("expected to wait at least timeout duration")
}
if err := db1.Close(); err != nil {
t.Fatal(err)
}
}
// Ensure that opening a database does not increase its size.
// https://github.com/boltdb/bolt/issues/291
func TestOpen_Size(t *testing.T) {
@ -362,6 +426,103 @@ func TestOpen_FileTooSmall(t *testing.T) {
}
}
// Ensure that a database can be opened in read-only mode by multiple processes
// and that a database can not be opened in read-write mode and in read-only
// mode at the same time.
func TestOpen_ReadOnly(t *testing.T) {
if runtime.GOOS == "solaris" {
t.Skip("solaris fcntl locks don't support intra-process locking")
}
bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`)
path := tempfile()
// Open in read-write mode.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
t.Fatal(err)
} else if db.IsReadOnly() {
t.Fatal("db should not be in read only mode")
}
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket(bucket)
if err != nil {
return err
}
if err := b.Put(key, value); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Close(); err != nil {
t.Fatal(err)
}
// Open in read-only mode.
db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
if err != nil {
t.Fatal(err)
}
// Opening in read-write mode should return an error.
if _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}); err == nil {
t.Fatal("expected error")
}
// And again (in read-only mode).
db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
if err != nil {
t.Fatal(err)
}
// Verify both read-only databases are accessible.
for _, db := range []*bolt.DB{db0, db1} {
// Verify is is in read only mode indeed.
if !db.IsReadOnly() {
t.Fatal("expected read only mode")
}
// Read-only databases should not allow updates.
if err := db.Update(func(*bolt.Tx) error {
panic(`should never get here`)
}); err != bolt.ErrDatabaseReadOnly {
t.Fatalf("unexpected error: %s", err)
}
// Read-only databases should not allow beginning writable txns.
if _, err := db.Begin(true); err != bolt.ErrDatabaseReadOnly {
t.Fatalf("unexpected error: %s", err)
}
// Verify the data.
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
if b == nil {
return fmt.Errorf("expected bucket `%s`", string(bucket))
}
got := string(b.Get(key))
expected := string(value)
if got != expected {
return fmt.Errorf("expected `%s`, got `%s`", expected, got)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
if err := db0.Close(); err != nil {
t.Fatal(err)
}
if err := db1.Close(); err != nil {
t.Fatal(err)
}
}
// TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough
// to hold data from concurrent write transaction resolves the issue that
// read transaction blocks the write transaction and causes deadlock.

View File

@ -24,12 +24,7 @@ func newFreelist() *freelist {
// size returns the size of the page after serialization.
func (f *freelist) size() int {
n := f.count()
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
}
// count returns count of pages on the freelist
@ -51,15 +46,16 @@ func (f *freelist) pending_count() int {
return count
}
// copyall copies into dst a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst.
func (f *freelist) copyall(dst []pgid) {
m := make(pgids, 0, f.pending_count())
// all returns a list of all free ids and all pending ids in one sorted list.
func (f *freelist) all() []pgid {
m := make(pgids, 0)
for _, list := range f.pending {
m = append(m, list...)
}
sort.Sort(m)
mergepgids(dst, f.ids, m)
return pgids(f.ids).merge(m)
}
// allocate returns the starting page id of a contiguous list of pages of a given size.
@ -190,22 +186,22 @@ func (f *freelist) read(p *page) {
// become free.
func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction.
ids := f.all()
// Update the header flag.
p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element.
lenids := f.count()
if lenids == 0 {
p.count = uint16(lenids)
} else if lenids < 0xFFFF {
p.count = uint16(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
if len(ids) == 0 {
p.count = uint16(len(ids))
} else if len(ids) < 0xFFFF {
p.count = uint16(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
} else {
p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:])
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
}
return nil
@ -240,7 +236,7 @@ func (f *freelist) reload(p *page) {
// reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() {
f.cache = make(map[pgid]bool, len(f.ids))
f.cache = make(map[pgid]bool)
for _, id := range f.ids {
f.cache[id] = true
}

View File

@ -145,33 +145,12 @@ func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil.
if len(a) == 0 {
return b
}
if len(b) == 0 {
} else if len(b) == 0 {
return a
}
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// mergepgids copies the sorted union of a and b into dst.
// If dst is too small, it panics.
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Create a list to hold all elements from both lists.
merged := make(pgids, 0, len(a)+len(b))
// Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b
@ -193,5 +172,7 @@ func mergepgids(dst, a, b pgids) {
}
// Append what's left in follow.
_ = append(merged, follow...)
merged = append(merged, follow...)
return merged
}

View File

@ -50,17 +50,9 @@ func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key)
func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
n := rand.Intn(qmaxitems-1) + 1
items := make(testdata, n)
used := make(map[string]bool)
for i := 0; i < n; i++ {
item := &items[i]
// Ensure that keys are unique by looping until we find one that we have not already used.
for {
item.Key = randByteSlice(rand, 1, qmaxksize)
if !used[string(item.Key)] {
used[string(item.Key)] = true
break
}
}
item.Key = randByteSlice(rand, 1, qmaxksize)
item.Value = randByteSlice(rand, 0, qmaxvsize)
}
return reflect.ValueOf(items)

View File

@ -381,9 +381,7 @@ func (tx *Tx) Check() <-chan error {
func (tx *Tx) check(ch chan error) {
// Check if any pages are double freed.
freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count())
tx.db.freelist.copyall(all)
for _, id := range all {
for _, id := range tx.db.freelist.all() {
if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id)
}

View File

@ -57,7 +57,7 @@ func (h *extensionHandlerClient) Send(hType mint.HandshakeType, el *mint.Extensi
func (h *extensionHandlerClient) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error {
ext := &tlsExtensionBody{}
found := el.Find(ext)
found, _ := el.Find(ext)
if hType != mint.HandshakeTypeEncryptedExtensions && hType != mint.HandshakeTypeNewSessionTicket {
if found {

View File

@ -66,7 +66,7 @@ func (h *extensionHandlerServer) Send(hType mint.HandshakeType, el *mint.Extensi
func (h *extensionHandlerServer) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error {
ext := &tlsExtensionBody{}
found := el.Find(ext)
found, _ := el.Find(ext)
if hType != mint.HandshakeTypeClientHello {
if found {

View File

@ -18,4 +18,11 @@ install: true
# don't call go test -v because we want to be able to only show t.Log output when
# a test fails
script: go test -tags CI -race $(go list ./... | grep -v /vendor/)
script: go test -race $(go list ./... | grep -v /vendor/)
# run a test for every major OS
env:
- GOOS=linux
- GOOS=windows
- GOOS=darwin

View File

@ -22,16 +22,15 @@ or other trivial change.
## Dependency Management
Currently mage has no dependencies(!) outside the standard libary. Let's keep
it that way. Since it's likely that mage will be vendored into a project,
adding dependencies to mage adds dependencies to every project that uses mage.
Currently mage has no dependencies(!). Let's try to keep it that way. Since
it's likely that mage will be vendored into a project, adding dependencies to
mage adds dependencies to every project that uses mage.
## Versions
Please avoid using features of go and the stdlib that prevent mage from being
buildable with older versions of Go. The CI tests currently check that mage is
buildable with go 1.7 and later. You may build with whatever version you like,
but CI has the final say.
Please try to avoid using features of go and the stdlib that prevent mage from
being buildable with old versions of Go. Definitely avoid anything that
requires go 1.9.
## Testing

View File

@ -1,35 +1,9 @@
<p align="center"><img src="https://user-images.githubusercontent.com/3185864/32058716-5ee9b512-ba38-11e7-978a-287eb2a62743.png"/></p>
<h1 align=center>mage</h1>
<p align="center"><img src="https://user-images.githubusercontent.com/3185864/31061203-6f6743dc-a6ec-11e7-9469-b8d667d9bc3f.png"/></p>
## About [![Build Status](https://travis-ci.org/magefile/mage.svg?branch=master)](https://travis-ci.org/magefile/mage)
<p align="center">Mage is a make/rake-like build tool using Go.</p>
Mage is a make/rake-like build tool using Go. You write plain-old go functions,
and Mage automatically uses them as Makefile-like runnable targets.
## Installation
Mage has no dependencies outside the Go standard library, and builds with Go 1.7
and above (possibly even lower versions, but they're not regularly tested).
Install mage by running
```
go get -u -d github.com/magefile/mage
cd $GOPATH/src/github.com/magefile/mage
go run bootstrap.go
```
This will download the code into your GOPATH, and then run the bootstrap script
to build mage with version infomation embedded in it. A normal `go get`
(without -d) will build the binary correctly, but no version info will be
embedded. If you've done this, no worries, just go to
$GOPATH/src/github.com/magefile/mage and run `mage install` or `go run
bootstrap.go` and a new binary will be created with the correct version
information.
The mage binary will be created in your $GOPATH/bin directory.
You may also install a binary release from our
[releases](https://github.com/magefile/mage/releases) page.
[![Build Status](https://travis-ci.org/magefile/mage.svg?branch=master)](https://travis-ci.org/magefile/mage)
## Demo

View File

@ -1,19 +0,0 @@
//+build ignore
package main
import (
"os"
"github.com/magefile/mage/mage"
)
// This is a bootstrap builder, to build mage when you don't already *have* mage.
// Run it like
// go run bootstrap.go
// and it will install mage with all the right flags created for you.
func main() {
os.Args = []string{os.Args[0], "-v", "install"}
os.Exit(mage.Main())
}

View File

@ -1,31 +0,0 @@
//+build CI
package main
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
)
func TestBootstrap(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
s, err := run("go", "run", "bootstrap.go")
if err != nil {
t.Fatal(s)
}
name := "mage"
if runtime.GOOS == "windows" {
name += ".exe"
}
if _, err := os.Stat(filepath.Join(os.Getenv("GOPATH"), "bin", name)); err != nil {
t.Fatal(err)
}
}

View File

@ -3,46 +3,23 @@
package main
import (
"bytes"
"errors"
"fmt"
"os"
"path/filepath"
"runtime"
"strings"
"time"
"github.com/magefile/mage/sh"
)
// Runs "go install" for mage. This generates the version info the binary.
func Install() error {
func Build() error {
ldf, err := flags()
if err != nil {
return err
}
name := "mage"
if runtime.GOOS == "windows" {
name += ".exe"
}
gopath, err := sh.Output("go", "env", "GOPATH")
if err != nil {
return fmt.Errorf("can't determine GOPATH: %v", err)
}
paths := strings.Split(gopath, string([]rune{os.PathListSeparator}))
bin := filepath.Join(paths[0], "bin")
// specifically don't mkdirall, if you have an invalid gopath in the first
// place, that's not on us to fix.
if err := os.Mkdir(bin, 0700); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create %q: %v", bin, err)
}
path := filepath.Join(bin, name)
// we use go build here because if someone built with go get, then `go
// install` turns into a no-op, and `go install -a` fails on people's
// machines that have go installed in a non-writeable directory (such as
// normal OS installs in /usr/bin)
return sh.RunV("go", "build", "-o", path, "-ldflags="+ldf, "github.com/magefile/mage")
return sh.Run("go", "install", "-ldflags="+ldf, "github.com/magefile/mage")
}
// Generates a new release. Expects the TAG environment variable to be set,
@ -83,8 +60,9 @@ func flags() (string, error) {
// tag returns the git tag for the current branch or "" if none.
func tag() string {
s, _ := sh.Output("git", "describe", "--tags")
return s
buf := &bytes.Buffer{}
_, _ = sh.Exec(nil, buf, nil, "git", "describe", "--tags")
return buf.String()
}
// hash returns the git hash for the current repo or "" if none.