fix quic manually

This commit is contained in:
Cadey Ratio 2018-01-03 11:29:07 -08:00
parent 792c0a9e81
commit 88a375f174
83 changed files with 608 additions and 841 deletions

24
Gopkg.lock generated
View File

@ -76,8 +76,8 @@
[[projects]] [[projects]]
name = "github.com/asdine/storm" name = "github.com/asdine/storm"
packages = [".","codec","codec/json","index","internal","q"] packages = [".","codec","codec/json","index","internal","q"]
revision = "255212403bcca439778718edf5e2d3d50744eca3" revision = "dbd37722730b6cb703b5bd825c3f142d87358525"
version = "v1.1.0" version = "v2.0.0"
[[projects]] [[projects]]
name = "github.com/aws/aws-sdk-go" name = "github.com/aws/aws-sdk-go"
@ -101,7 +101,7 @@
branch = "master" branch = "master"
name = "github.com/bifurcation/mint" name = "github.com/bifurcation/mint"
packages = [".","syntax"] packages = [".","syntax"]
revision = "d5dd291d400abddb674b2b2acfee6881c1c8f8e5" revision = "f699e8d03646cb8e6e15410ced7bff37fcf8dddd"
[[projects]] [[projects]]
name = "github.com/blang/semver" name = "github.com/blang/semver"
@ -109,12 +109,6 @@
revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f" revision = "2ee87856327ba09384cabd113bc6b5d174e9ec0f"
version = "v3.5.1" version = "v3.5.1"
[[projects]]
name = "github.com/boltdb/bolt"
packages = ["."]
revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8"
version = "v1.3.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/brandur/simplebox" name = "github.com/brandur/simplebox"
@ -127,6 +121,12 @@
revision = "7cd7992b3bc86f920394f8de92c13900da1a46b7" revision = "7cd7992b3bc86f920394f8de92c13900da1a46b7"
version = "v3.2.0" version = "v3.2.0"
[[projects]]
name = "github.com/coreos/bbolt"
packages = ["."]
revision = "583e8937c61f1af6513608ccc75c97b6abdf4ff9"
version = "v1.3.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/dgryski/go-failure" name = "github.com/dgryski/go-failure"
@ -325,10 +325,10 @@
revision = "d2f86524cced5186554df90d92529757d22c1cb6" revision = "d2f86524cced5186554df90d92529757d22c1cb6"
[[projects]] [[projects]]
branch = "master"
name = "github.com/magefile/mage" name = "github.com/magefile/mage"
packages = ["mg","types"] packages = ["mg","types"]
revision = "63768081a3236a7c6c53ef72e402ae1fe1664b61" revision = "ab3ca2f6f85577d7ec82e0a6df721147a2e737f9"
version = "v2.0.1"
[[projects]] [[projects]]
name = "github.com/mattn/go-isatty" name = "github.com/mattn/go-isatty"
@ -525,6 +525,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "207902d7a1c84bb5bca1b004cbb19f67e06f2231f9ad48b2129698bf47f115a7" inputs-digest = "dd3b3341036bb95c8a409729fa12b897e6515c32cfaae8218cf27d60ad1a3b07"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -39,7 +39,7 @@
[[constraint]] [[constraint]]
name = "github.com/asdine/storm" name = "github.com/asdine/storm"
version = "1.1.0" version = "2.0.0"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
@ -47,7 +47,7 @@
[[constraint]] [[constraint]]
name = "github.com/caarlos0/env" name = "github.com/caarlos0/env"
version = "3.0.0" version = "3.2.0"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
@ -69,6 +69,10 @@
name = "github.com/google/gops" name = "github.com/google/gops"
version = "0.3.2" version = "0.3.2"
[[constraint]]
name = "github.com/hashicorp/terraform"
version = "0.11.1"
[[constraint]] [[constraint]]
name = "github.com/joho/godotenv" name = "github.com/joho/godotenv"
version = "1.2.0" version = "1.2.0"
@ -82,8 +86,12 @@
name = "github.com/kr/pretty" name = "github.com/kr/pretty"
[[constraint]] [[constraint]]
branch = "master" name = "github.com/lucas-clemente/quic-go"
version = "0.6.0"
[[constraint]]
name = "github.com/magefile/mage" name = "github.com/magefile/mage"
version = "2.0.1"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
@ -103,7 +111,7 @@
[[constraint]] [[constraint]]
name = "github.com/xtaci/kcp-go" name = "github.com/xtaci/kcp-go"
version = "3.19.0" version = "3.23.0"
[[constraint]] [[constraint]]
name = "github.com/xtaci/smux" name = "github.com/xtaci/smux"
@ -111,7 +119,7 @@
[[constraint]] [[constraint]]
name = "go.uber.org/atomic" name = "go.uber.org/atomic"
version = "1.2.0" version = "1.3.1"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
@ -123,12 +131,8 @@
[[constraint]] [[constraint]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
version = "1.6.0" version = "1.9.0"
[[constraint]] [[constraint]]
name = "gopkg.in/alecthomas/kingpin.v2" name = "gopkg.in/alecthomas/kingpin.v2"
version = "2.2.5" version = "2.2.6"
[[constraint]]
branch = "master"
name = "github.com/mmatczuk/go-http-tunnel"

View File

@ -4,10 +4,9 @@ before_install:
- go get github.com/stretchr/testify - go get github.com/stretchr/testify
go: go:
- 1.5
- 1.6
- 1.7 - 1.7
- 1.8 - 1.8
- 1.9
- tip - tip
script: script:

View File

@ -1,11 +1,10 @@
# Storm # Storm
[![Join the chat at https://gitter.im/asdine/storm](https://badges.gitter.im/asdine/storm.svg)](https://gitter.im/asdine/storm?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Build Status](https://travis-ci.org/asdine/storm.svg)](https://travis-ci.org/asdine/storm) [![Build Status](https://travis-ci.org/asdine/storm.svg)](https://travis-ci.org/asdine/storm)
[![GoDoc](https://godoc.org/github.com/asdine/storm?status.svg)](https://godoc.org/github.com/asdine/storm) [![GoDoc](https://godoc.org/github.com/asdine/storm?status.svg)](https://godoc.org/github.com/asdine/storm)
[![Go Report Card](https://goreportcard.com/badge/github.com/asdine/storm)](https://goreportcard.com/report/github.com/asdine/storm) [![Go Report Card](https://goreportcard.com/badge/github.com/asdine/storm)](https://goreportcard.com/report/github.com/asdine/storm)
Storm is a simple and powerful toolkit for [BoltDB](https://github.com/boltdb/bolt). Basically, Storm provides indexes, a wide range of methods to store and fetch data, an advanced query system, and much more. Storm is a simple and powerful toolkit for [BoltDB](https://github.com/coreos/bbolt). Basically, Storm provides indexes, a wide range of methods to store and fetch data, an advanced query system, and much more.
In addition to the examples below, see also the [examples in the GoDoc](https://godoc.org/github.com/asdine/storm#pkg-examples). In addition to the examples below, see also the [examples in the GoDoc](https://godoc.org/github.com/asdine/storm#pkg-examples).
@ -43,7 +42,6 @@ In addition to the examples below, see also the [examples in the GoDoc](https://
- [Node options](#node-options) - [Node options](#node-options)
- [Simple Key/Value store](#simple-keyvalue-store) - [Simple Key/Value store](#simple-keyvalue-store)
- [BoltDB](#boltdb) - [BoltDB](#boltdb)
- [Migrations](#migrations)
- [License](#license) - [License](#license)
- [Credits](#credits) - [Credits](#credits)
@ -62,6 +60,7 @@ import "github.com/asdine/storm"
## Open a database ## Open a database
Quick way of opening a database Quick way of opening a database
```go ```go
db, err := storm.Open("my.db") db, err := storm.Open("my.db")
@ -103,11 +102,11 @@ type Base struct {
} }
type User struct { type User struct {
Base `storm:"inline"` Base `storm:"inline"`
Group string `storm:"index"` Group string `storm:"index"`
Email string `storm:"unique"` Email string `storm:"unique"`
Name string Name string
CreatedAt time.Time `storm:"index"` CreatedAt time.Time `storm:"index"`
} }
``` ```
@ -142,11 +141,11 @@ Storm can auto increment integer values so you don't have to worry about that wh
```go ```go
type Product struct { type Product struct {
Pk int `storm:"id,increment"` // primary key with auto increment Pk int `storm:"id,increment"` // primary key with auto increment
Name string Name string
IntegerField uint64 `storm:"increment"` IntegerField uint64 `storm:"increment"`
IndexedIntegerField uint32 `storm:"index,increment"` IndexedIntegerField uint32 `storm:"index,increment"`
UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set UniqueIntegerField int16 `storm:"unique,increment=100"` // the starting value can be set
} }
p := Product{Name: "Vaccum Cleaner"} p := Product{Name: "Vaccum Cleaner"}
@ -175,7 +174,7 @@ fmt.Println(p.UniqueIntegerField)
### Simple queries ### Simple queries
Any object can be fetched, indexed or not. Storm uses indexes when available, otherwhise it uses the [query system](#advanced-queries). Any object can be fetched, indexed or not. Storm uses indexes when available, otherwise it uses the [query system](#advanced-queries).
#### Fetch one object #### Fetch one object
@ -433,6 +432,7 @@ if err != nil {
return tx.Commit() return tx.Commit()
``` ```
### Options ### Options
Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options. Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options.
@ -462,12 +462,12 @@ These can be used by importing the relevant package and use that codec to config
```go ```go
import ( import (
"github.com/asdine/storm" "github.com/asdine/storm"
"github.com/asdine/storm/codec/gob" "github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json" "github.com/asdine/storm/codec/json"
"github.com/asdine/storm/codec/sereal" "github.com/asdine/storm/codec/sereal"
"github.com/asdine/storm/codec/protobuf" "github.com/asdine/storm/codec/protobuf"
"github.com/asdine/storm/codec/msgpack" "github.com/asdine/storm/codec/msgpack"
) )
var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec)) var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
@ -490,7 +490,7 @@ db := storm.Open("my.db", storm.UseDB(bDB))
#### Batch mode #### Batch mode
Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/boltdb/bolt#batch-read-write-transactions)) Batch mode can be enabled to speed up concurrent writes (see [Batch read-write transactions](https://github.com/coreos/bbolt#batch-read-write-transactions))
```go ```go
db := storm.Open("my.db", storm.Batch()) db := storm.Open("my.db", storm.Batch())
@ -546,16 +546,19 @@ n := db.From("my-node")
``` ```
Give a bolt.Tx transaction to the Node Give a bolt.Tx transaction to the Node
```go ```go
n = n.WithTransaction(tx) n = n.WithTransaction(tx)
``` ```
Enable batch mode Enable batch mode
```go ```go
n = n.WithBatch(true) n = n.WithBatch(true)
``` ```
Use a Codec Use a Codec
```go ```go
n = n.WithCodec(gob.Codec) n = n.WithCodec(gob.Codec)
``` ```
@ -566,6 +569,7 @@ Storm can be used as a simple, robust, key/value store that can store anything.
The key and the value can be of any type as long as the key is not a zero value. The key and the value can be of any type as long as the key is not a zero value.
Saving data : Saving data :
```go ```go
db.Set("logs", time.Now(), "I'm eating my breakfast man") db.Set("logs", time.Now(), "I'm eating my breakfast man")
db.Set("sessions", bson.NewObjectId(), &someUser) db.Set("sessions", bson.NewObjectId(), &someUser)
@ -576,6 +580,7 @@ db.Set("weird storage", "754-3010", map[string]interface{}{
``` ```
Fetching data : Fetching data :
```go ```go
user := User{} user := User{}
db.Get("sessions", someObjectId, &user) db.Get("sessions", someObjectId, &user)
@ -587,6 +592,7 @@ db.Get("sessions", someObjectId, &details)
``` ```
Deleting data : Deleting data :
```go ```go
db.Delete("sessions", someObjectId) db.Delete("sessions", someObjectId)
db.Delete("weird storage", "754-3010") db.Delete("weird storage", "754-3010")
@ -617,11 +623,6 @@ db.Bolt.Update(func(tx *bolt.Tx) error {
}) })
``` ```
## Migrations
You can use the migration tool to migrate databases that use older version of Storm.
See this [README](https://github.com/asdine/storm-migrator) for more informations.
## License ## License
MIT MIT

View File

@ -7,7 +7,7 @@ import (
) )
func BenchmarkFindWithIndex(b *testing.B) { func BenchmarkFindWithIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement()) db, cleanup := createDB(b)
defer cleanup() defer cleanup()
var users []User var users []User
@ -37,7 +37,7 @@ func BenchmarkFindWithIndex(b *testing.B) {
} }
func BenchmarkFindWithoutIndex(b *testing.B) { func BenchmarkFindWithoutIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement()) db, cleanup := createDB(b)
defer cleanup() defer cleanup()
var users []User var users []User
@ -67,7 +67,7 @@ func BenchmarkFindWithoutIndex(b *testing.B) {
} }
func BenchmarkOneWithIndex(b *testing.B) { func BenchmarkOneWithIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement()) db, cleanup := createDB(b)
defer cleanup() defer cleanup()
var u User var u User
@ -89,7 +89,7 @@ func BenchmarkOneWithIndex(b *testing.B) {
} }
func BenchmarkOneByID(b *testing.B) { func BenchmarkOneByID(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement()) db, cleanup := createDB(b)
defer cleanup() defer cleanup()
type User struct { type User struct {
@ -120,7 +120,7 @@ func BenchmarkOneByID(b *testing.B) {
} }
func BenchmarkOneWithoutIndex(b *testing.B) { func BenchmarkOneWithoutIndex(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement()) db, cleanup := createDB(b)
defer cleanup() defer cleanup()
var u User var u User
@ -142,7 +142,7 @@ func BenchmarkOneWithoutIndex(b *testing.B) {
} }
func BenchmarkSave(b *testing.B) { func BenchmarkSave(b *testing.B) {
db, cleanup := createDB(b, AutoIncrement()) db, cleanup := createDB(b)
defer cleanup() defer cleanup()
w := User{Name: "John"} w := User{Name: "John"}

View File

@ -1,6 +1,6 @@
package storm package storm
import "github.com/boltdb/bolt" import "github.com/coreos/bbolt"
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't // CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist. // already exist.

View File

@ -16,9 +16,9 @@ func TestBucket(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
require.Nil(t, db.root.GetBucket(readTx, "none")) require.Nil(t, db.GetBucket(readTx, "none"))
b, err := db.root.CreateBucketIfNotExists(readTx, "new") b, err := db.CreateBucketIfNotExists(readTx, "new")
// Cannot create buckets in a read transaction // Cannot create buckets in a read transaction
require.Error(t, err) require.Error(t, err)
@ -36,9 +36,9 @@ func TestBucket(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
require.Nil(t, db.root.GetBucket(writeTx, "none")) require.Nil(t, db.GetBucket(writeTx, "none"))
b, err = db.root.CreateBucketIfNotExists(writeTx, "new") b, err = db.CreateBucketIfNotExists(writeTx, "new")
require.NoError(t, err) require.NoError(t, err)
require.NotNil(t, b) require.NotNil(t, b)
@ -59,8 +59,8 @@ func TestBucket(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
require.NotNil(t, db.root.GetBucket(readTx, "new")) require.NotNil(t, db.GetBucket(readTx, "new"))
require.Nil(t, db.root.GetBucket(readTx, "c")) require.Nil(t, db.GetBucket(readTx, "c"))
require.NotNil(t, n2.GetBucket(readTx, "c")) require.NotNil(t, n2.GetBucket(readTx, "c"))
readTx.Rollback() readTx.Rollback()

View File

@ -43,9 +43,6 @@ var (
// ErrNotInTransaction is returned when trying to rollback or commit when not in transaction. // ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
ErrNotInTransaction = errors.New("not in transaction") ErrNotInTransaction = errors.New("not in transaction")
// ErrUnAddressable is returned when a struct or an exported field of a struct is unaddressable
ErrUnAddressable = errors.New("unaddressable value")
// ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field // ErrIncompatibleValue is returned when trying to set a value with a different type than the chosen field
ErrIncompatibleValue = errors.New("incompatible value") ErrIncompatibleValue = errors.New("incompatible value")

View File

@ -10,7 +10,8 @@ import (
"time" "time"
"github.com/asdine/storm" "github.com/asdine/storm"
"github.com/boltdb/bolt" "github.com/asdine/storm/codec/gob"
"github.com/coreos/bbolt"
) )
func ExampleDB_Save() { func ExampleDB_Save() {
@ -18,7 +19,7 @@ func ExampleDB_Save() {
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
type User struct { type User struct {
ID int `storm:"id"` ID int `storm:"id,increment"` // the increment tag will auto-increment integer IDs without existing values.
Group string `storm:"index"` Group string `storm:"index"`
Email string `storm:"unique"` Email string `storm:"unique"`
Name string Name string
@ -27,8 +28,7 @@ func ExampleDB_Save() {
} }
// Open takes an optional list of options as the last argument. // Open takes an optional list of options as the last argument.
// AutoIncrement will auto-increment integer IDs without existing values. db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.Codec(gob.Codec))
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement())
defer db.Close() defer db.Close()
user := User{ user := User{
@ -497,7 +497,7 @@ func ExampleNode_RangeScan() {
} }
type User struct { type User struct {
ID int `storm:"id"` ID int `storm:"id,increment"`
Group string `storm:"index"` Group string `storm:"index"`
Email string `storm:"unique"` Email string `storm:"unique"`
Name string Name string
@ -506,7 +506,7 @@ type User struct {
} }
type Account struct { type Account struct {
ID int `storm:"id"` ID int `storm:"id,increment"`
Amount int64 // amount in cents Amount int64 // amount in cents
} }
@ -517,7 +517,7 @@ type Note struct {
func prepareDB() (string, *storm.DB) { func prepareDB() (string, *storm.DB) {
dir, _ := ioutil.TempDir(os.TempDir(), "storm") dir, _ := ioutil.TempDir(os.TempDir(), "storm")
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement()) db, _ := storm.Open(filepath.Join(dir, "storm.db"))
for i, name := range []string{"John", "Eric", "Dilbert"} { for i, name := range []string{"John", "Eric", "Dilbert"} {
email := strings.ToLower(name + "@provider.com") email := strings.ToLower(name + "@provider.com")

View File

@ -7,7 +7,7 @@ import (
"strings" "strings"
"github.com/asdine/storm/index" "github.com/asdine/storm/index"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// Storm tags // Storm tags

View File

@ -6,11 +6,11 @@ import (
"github.com/asdine/storm/index" "github.com/asdine/storm/index"
"github.com/asdine/storm/q" "github.com/asdine/storm/q"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// A Finder can fetch types from BoltDB // A finder can fetch types from BoltDB
type Finder interface { type finder interface {
// One returns one record by the specified index // One returns one record by the specified index
One(fieldName string, value interface{}, to interface{}) error One(fieldName string, value interface{}, to interface{}) error
@ -79,7 +79,7 @@ func (n *node) One(fieldName string, value interface{}, to interface{}) error {
return sink.flush() return sink.flush()
} }
val, err := toBytes(value, n.s.codec) val, err := toBytes(value, n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -119,7 +119,7 @@ func (n *node) one(tx *bolt.Tx, bucketName, fieldName string, cfg *structConfig,
return ErrNotFound return ErrNotFound
} }
return n.s.codec.Unmarshal(raw, to) return n.codec.Unmarshal(raw, to)
} }
// Find returns one or more records by the specified index // Find returns one or more records by the specified index
@ -164,7 +164,7 @@ func (n *node) Find(fieldName string, value interface{}, to interface{}, options
return sink.flush() return sink.flush()
} }
val, err := toBytes(value, n.s.codec) val, err := toBytes(value, n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -280,7 +280,7 @@ func (n *node) allByIndex(tx *bolt.Tx, fieldName string, cfg *structConfig, ref
return ErrNotFound return ErrNotFound
} }
err = n.s.codec.Unmarshal(raw, results.Index(i).Addr().Interface()) err = n.codec.Unmarshal(raw, results.Index(i).Addr().Interface())
if err != nil { if err != nil {
return err return err
} }
@ -359,12 +359,12 @@ func (n *node) Range(fieldName string, min, max, to interface{}, options ...func
return sink.flush() return sink.flush()
} }
mn, err := toBytes(min, n.s.codec) mn, err := toBytes(min, n.codec)
if err != nil { if err != nil {
return err return err
} }
mx, err := toBytes(max, n.s.codec) mx, err := toBytes(max, n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -450,7 +450,7 @@ func (n *node) Prefix(fieldName string, prefix string, to interface{}, options .
return sink.flush() return sink.flush()
} }
prfx, err := toBytes(prefix, n.s.codec) prfx, err := toBytes(prefix, n.codec)
if err != nil { if err != nil {
return err return err
} }

View File

@ -8,7 +8,7 @@ import (
"testing" "testing"
"time" "time"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -110,7 +110,7 @@ func TestFind(t *testing.T) {
} }
func TestFindNil(t *testing.T) { func TestFindNil(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement()) db, cleanup := createDB(t)
defer cleanup() defer cleanup()
type User struct { type User struct {
@ -150,11 +150,11 @@ func TestFindNil(t *testing.T) {
} }
func TestFindIntIndex(t *testing.T) { func TestFindIntIndex(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement()) db, cleanup := createDB(t)
defer cleanup() defer cleanup()
type Score struct { type Score struct {
ID int ID int `storm:"increment"`
Score uint64 `storm:"index"` Score uint64 `storm:"index"`
} }

View File

@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"github.com/asdine/storm/internal" "github.com/asdine/storm/internal"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// NewListIndex loads a ListIndex // NewListIndex loads a ListIndex

View File

@ -11,7 +11,7 @@ import (
"github.com/asdine/storm" "github.com/asdine/storm"
"github.com/asdine/storm/codec/gob" "github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/index" "github.com/asdine/storm/index"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@ -4,7 +4,7 @@ import (
"bytes" "bytes"
"github.com/asdine/storm/internal" "github.com/asdine/storm/internal"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// NewUniqueIndex loads a UniqueIndex // NewUniqueIndex loads a UniqueIndex

View File

@ -10,7 +10,7 @@ import (
"github.com/asdine/storm" "github.com/asdine/storm"
"github.com/asdine/storm/codec/gob" "github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/index" "github.com/asdine/storm/index"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@ -3,7 +3,7 @@ package internal
import ( import (
"bytes" "bytes"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// Cursor that can be reversed // Cursor that can be reversed

18
vendor/github.com/asdine/storm/kv.go generated vendored
View File

@ -3,11 +3,11 @@ package storm
import ( import (
"reflect" "reflect"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// KeyValueStore can store and fetch values by key // keyValueStore can store and fetch values by key
type KeyValueStore interface { type keyValueStore interface {
// Get a value from a bucket // Get a value from a bucket
Get(bucketName string, key interface{}, to interface{}) error Get(bucketName string, key interface{}, to interface{}) error
// Set a key/value pair into a bucket // Set a key/value pair into a bucket
@ -22,7 +22,7 @@ type KeyValueStore interface {
// GetBytes gets a raw value from a bucket. // GetBytes gets a raw value from a bucket.
func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) { func (n *node) GetBytes(bucketName string, key interface{}) ([]byte, error) {
id, err := toBytes(key, n.s.codec) id, err := toBytes(key, n.codec)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -61,7 +61,7 @@ func (n *node) SetBytes(bucketName string, key interface{}, value []byte) error
return ErrNilParam return ErrNilParam
} }
id, err := toBytes(key, n.s.codec) id, err := toBytes(key, n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -94,7 +94,7 @@ func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
return ErrPtrNeeded return ErrPtrNeeded
} }
id, err := toBytes(key, n.s.codec) id, err := toBytes(key, n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -105,7 +105,7 @@ func (n *node) Get(bucketName string, key interface{}, to interface{}) error {
return err return err
} }
return n.s.codec.Unmarshal(raw, to) return n.codec.Unmarshal(raw, to)
}) })
} }
@ -114,7 +114,7 @@ func (n *node) Set(bucketName string, key interface{}, value interface{}) error
var data []byte var data []byte
var err error var err error
if value != nil { if value != nil {
data, err = n.s.codec.Marshal(value) data, err = n.codec.Marshal(value)
if err != nil { if err != nil {
return err return err
} }
@ -125,7 +125,7 @@ func (n *node) Set(bucketName string, key interface{}, value interface{}) error
// Delete deletes a key from a bucket // Delete deletes a key from a bucket
func (n *node) Delete(bucketName string, key interface{}) error { func (n *node) Delete(bucketName string, key interface{}) error {
id, err := toBytes(key, n.s.codec) id, err := toBytes(key, n.codec)
if err != nil { if err != nil {
return err return err
} }

View File

@ -7,7 +7,7 @@ import (
"github.com/asdine/storm/codec/gob" "github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json" "github.com/asdine/storm/codec/json"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )

View File

@ -3,7 +3,7 @@ package storm
import ( import (
"reflect" "reflect"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
const ( const (

View File

@ -2,15 +2,15 @@ package storm
import ( import (
"github.com/asdine/storm/codec" "github.com/asdine/storm/codec"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// A Node in Storm represents the API to a BoltDB bucket. // A Node in Storm represents the API to a BoltDB bucket.
type Node interface { type Node interface {
Tx tx
TypeStore typeStore
KeyValueStore keyValueStore
BucketScanner bucketScanner
// From returns a new Storm node with a new bucket root below the current. // From returns a new Storm node with a new bucket root below the current.
// All DB operations on the new node will be executed relative to this bucket. // All DB operations on the new node will be executed relative to this bucket.
From(addend ...string) Node From(addend ...string) Node

View File

@ -5,7 +5,7 @@ import (
"github.com/asdine/storm/codec/gob" "github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json" "github.com/asdine/storm/codec/json"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -17,13 +17,13 @@ func TestNode(t *testing.T) {
node1, ok := n1.(*node) node1, ok := n1.(*node)
require.True(t, ok) require.True(t, ok)
require.Equal(t, db, node1.s) require.Equal(t, db, node1.s)
require.NotEqual(t, db.root, n1) require.NotEqual(t, db.Node, n1)
require.Equal(t, []string{"a"}, db.root.rootBucket) require.Equal(t, []string{"a"}, db.Node.(*node).rootBucket)
require.Equal(t, []string{"b", "c"}, node1.rootBucket) require.Equal(t, []string{"a", "b", "c"}, node1.rootBucket)
n2 := n1.From("d", "e") n2 := n1.From("d", "e")
node2, ok := n2.(*node) node2, ok := n2.(*node)
require.True(t, ok) require.True(t, ok)
require.Equal(t, []string{"b", "c", "d", "e"}, node2.rootBucket) require.Equal(t, []string{"a", "b", "c", "d", "e"}, node2.rootBucket)
} }
func TestNodeWithTransaction(t *testing.T) { func TestNodeWithTransaction(t *testing.T) {
@ -46,17 +46,90 @@ func TestNodeWithTransaction(t *testing.T) {
} }
func TestNodeWithCodec(t *testing.T) { func TestNodeWithCodec(t *testing.T) {
db, cleanup := createDB(t) t.Run("Inheritance", func(t *testing.T) {
defer cleanup() db, cleanup := createDB(t)
defer cleanup()
n := db.From("a").(*node) n := db.From("a").(*node)
require.Equal(t, json.Codec, n.codec) require.Equal(t, json.Codec, n.codec)
n = n.From("b", "c", "d").(*node) n = n.From("b", "c", "d").(*node)
require.Equal(t, json.Codec, n.codec) require.Equal(t, json.Codec, n.codec)
n = db.WithCodec(gob.Codec).(*node) n = db.WithCodec(gob.Codec).(*node)
n = n.From("e").(*node) n = n.From("e").(*node)
require.Equal(t, gob.Codec, n.codec) require.Equal(t, gob.Codec, n.codec)
o := n.From("f").WithCodec(json.Codec).(*node) o := n.From("f").WithCodec(json.Codec).(*node)
require.Equal(t, gob.Codec, n.codec) require.Equal(t, gob.Codec, n.codec)
require.Equal(t, json.Codec, o.codec) require.Equal(t, json.Codec, o.codec)
})
t.Run("CodecCall", func(t *testing.T) {
db, cleanup := createDB(t)
defer cleanup()
type User struct {
ID int
Name string `storm:"index"`
}
requireBytesEqual := func(raw []byte, expected interface{}) {
var u User
err := gob.Codec.Unmarshal(raw, &u)
require.NoError(t, err)
require.Equal(t, expected, u)
}
n := db.From("a").WithCodec(gob.Codec)
err := n.Set("gobBucket", "key", &User{ID: 10, Name: "John"})
require.NoError(t, err)
b, err := n.GetBytes("gobBucket", "key")
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "John"})
id, err := toBytes(10, n.(*node).codec)
require.NoError(t, err)
err = n.Save(&User{ID: 10, Name: "John"})
require.NoError(t, err)
b, err = n.GetBytes("User", id)
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "John"})
err = n.Update(&User{ID: 10, Name: "Jack"})
require.NoError(t, err)
b, err = n.GetBytes("User", id)
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "Jack"})
err = n.UpdateField(&User{ID: 10}, "Name", "John")
require.NoError(t, err)
b, err = n.GetBytes("User", id)
require.NoError(t, err)
requireBytesEqual(b, User{ID: 10, Name: "John"})
var users []User
err = n.Find("Name", "John", &users)
require.NoError(t, err)
var user User
err = n.One("Name", "John", &user)
require.NoError(t, err)
err = n.AllByIndex("Name", &users)
require.NoError(t, err)
err = n.All(&users)
require.NoError(t, err)
err = n.Range("Name", "J", "K", &users)
require.NoError(t, err)
err = n.Prefix("Name", "J", &users)
require.NoError(t, err)
_, err = n.Count(new(User))
require.NoError(t, err)
err = n.Select().Find(&users)
require.NoError(t, err)
})
} }

View File

@ -5,57 +5,48 @@ import (
"github.com/asdine/storm/codec" "github.com/asdine/storm/codec"
"github.com/asdine/storm/index" "github.com/asdine/storm/index"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// BoltOptions used to pass options to BoltDB. // BoltOptions used to pass options to BoltDB.
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*DB) error { func BoltOptions(mode os.FileMode, options *bolt.Options) func(*Options) error {
return func(d *DB) error { return func(opts *Options) error {
d.boltMode = mode opts.boltMode = mode
d.boltOptions = options opts.boltOptions = options
return nil return nil
} }
} }
// Codec used to set a custom encoder and decoder. The default is JSON. // Codec used to set a custom encoder and decoder. The default is JSON.
func Codec(c codec.MarshalUnmarshaler) func(*DB) error { func Codec(c codec.MarshalUnmarshaler) func(*Options) error {
return func(d *DB) error { return func(opts *Options) error {
d.codec = c opts.codec = c
return nil return nil
} }
} }
// Batch enables the use of batch instead of update for read-write transactions. // Batch enables the use of batch instead of update for read-write transactions.
func Batch() func(*DB) error { func Batch() func(*Options) error {
return func(d *DB) error { return func(opts *Options) error {
d.batchMode = true opts.batchMode = true
return nil
}
}
// AutoIncrement used to enable bolt.NextSequence on empty integer ids.
// Deprecated: Set the increment tag to the id field instead.
func AutoIncrement() func(*DB) error {
return func(d *DB) error {
d.autoIncrement = true
return nil return nil
} }
} }
// Root used to set the root bucket. See also the From method. // Root used to set the root bucket. See also the From method.
func Root(root ...string) func(*DB) error { func Root(root ...string) func(*Options) error {
return func(d *DB) error { return func(opts *Options) error {
d.rootBucket = root opts.rootBucket = root
return nil return nil
} }
} }
// UseDB allow Storm to use an existing open Bolt.DB. // UseDB allows Storm to use an existing open Bolt.DB.
// Warning: storm.DB.Close() will close the bolt.DB instance. // Warning: storm.DB.Close() will close the bolt.DB instance.
func UseDB(b *bolt.DB) func(*DB) error { func UseDB(b *bolt.DB) func(*Options) error {
return func(d *DB) error { return func(opts *Options) error {
d.Path = b.Path() opts.path = b.Path()
d.Bolt = b opts.bolt = b
return nil return nil
} }
} }
@ -80,3 +71,27 @@ func Reverse() func(*index.Options) {
opts.Reverse = true opts.Reverse = true
} }
} }
// Options are used to customize the way Storm opens a database.
type Options struct {
// Handles encoding and decoding of objects
codec codec.MarshalUnmarshaler
// Bolt file mode
boltMode os.FileMode
// Bolt options
boltOptions *bolt.Options
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
// The root bucket name
rootBucket []string
// Path of the database file
path string
// Bolt is still easily accessible
bolt *bolt.DB
}

View File

@ -37,7 +37,7 @@ func ExampleRe() {
} }
type User struct { type User struct {
ID int `storm:"id"` ID int `storm:"id,increment"`
Group string `storm:"index"` Group string `storm:"index"`
Email string `storm:"unique"` Email string `storm:"unique"`
Name string Name string
@ -47,7 +47,7 @@ type User struct {
func prepareDB() (string, *storm.DB) { func prepareDB() (string, *storm.DB) {
dir, _ := ioutil.TempDir(os.TempDir(), "storm") dir, _ := ioutil.TempDir(os.TempDir(), "storm")
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement()) db, _ := storm.Open(filepath.Join(dir, "storm.db"))
for i, name := range []string{"John", "Norm", "Donald", "Eric", "Dilbert"} { for i, name := range []string{"John", "Norm", "Donald", "Eric", "Dilbert"} {
email := strings.ToLower(name + "@provider.com") email := strings.ToLower(name + "@provider.com")

View File

@ -3,7 +3,7 @@ package storm
import ( import (
"github.com/asdine/storm/internal" "github.com/asdine/storm/internal"
"github.com/asdine/storm/q" "github.com/asdine/storm/q"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// Select a list of records that match a list of matchers. Doesn't use indexes. // Select a list of records that match a list of matchers. Doesn't use indexes.

View File

@ -10,12 +10,12 @@ import (
) )
type Score struct { type Score struct {
ID int ID int `storm:"increment"`
Value int Value int
} }
func prepareScoreDB(t *testing.T) (*DB, func()) { func prepareScoreDB(t *testing.T) (*DB, func()) {
db, cleanup := createDB(t, AutoIncrement()) db, cleanup := createDB(t)
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
err := db.Save(&Score{ err := db.Save(&Score{
@ -492,7 +492,7 @@ func TestSelectCount(t *testing.T) {
} }
func TestSelectRaw(t *testing.T) { func TestSelectRaw(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement(), Codec(json.Codec)) db, cleanup := createDB(t, Codec(json.Codec))
defer cleanup() defer cleanup()
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {
@ -520,7 +520,7 @@ func TestSelectRaw(t *testing.T) {
} }
func TestSelectEach(t *testing.T) { func TestSelectEach(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement(), Codec(json.Codec)) db, cleanup := createDB(t, Codec(json.Codec))
defer cleanup() defer cleanup()
for i := 0; i < 20; i++ { for i := 0; i < 20; i++ {

View File

@ -3,11 +3,11 @@ package storm
import ( import (
"bytes" "bytes"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// A BucketScanner scans a Node for a list of buckets // A bucketScanner scans a Node for a list of buckets
type BucketScanner interface { type bucketScanner interface {
// PrefixScan scans the root buckets for keys matching the given prefix. // PrefixScan scans the root buckets for keys matching the given prefix.
PrefixScan(prefix string) []Node PrefixScan(prefix string) []Node
// PrefixScan scans the buckets in this node for keys matching the given prefix. // PrefixScan scans the buckets in this node for keys matching the given prefix.

View File

@ -6,7 +6,7 @@ import (
"github.com/asdine/storm/index" "github.com/asdine/storm/index"
"github.com/asdine/storm/q" "github.com/asdine/storm/q"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
type item struct { type item struct {

View File

@ -6,12 +6,12 @@ import (
"github.com/asdine/storm/index" "github.com/asdine/storm/index"
"github.com/asdine/storm/q" "github.com/asdine/storm/q"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
) )
// TypeStore stores user defined types in BoltDB // typeStore stores user defined types in BoltDB
type TypeStore interface { type typeStore interface {
Finder finder
// Init creates the indexes and buckets for a given structure // Init creates the indexes and buckets for a given structure
Init(data interface{}) error Init(data interface{}) error
@ -32,10 +32,6 @@ type TypeStore interface {
// DeleteStruct deletes a structure from the associated bucket // DeleteStruct deletes a structure from the associated bucket
DeleteStruct(data interface{}) error DeleteStruct(data interface{}) error
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
Remove(data interface{}) error
} }
// Init creates the indexes and buckets for a given structure // Init creates the indexes and buckets for a given structure
@ -152,7 +148,7 @@ func (n *node) Save(data interface{}) error {
} }
if cfg.ID.IsZero { if cfg.ID.IsZero {
if !cfg.ID.IsInteger || (!n.s.autoIncrement && !cfg.ID.Increment) { if !cfg.ID.IsInteger || !cfg.ID.Increment {
return ErrZeroID return ErrZeroID
} }
} }
@ -181,7 +177,7 @@ func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update boo
} }
} }
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec) id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -215,7 +211,7 @@ func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update boo
continue continue
} }
value, err := toBytes(fieldCfg.Value.Interface(), n.s.codec) value, err := toBytes(fieldCfg.Value.Interface(), n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -250,7 +246,7 @@ func (n *node) save(tx *bolt.Tx, cfg *structConfig, data interface{}, update boo
} }
} }
raw, err := n.s.codec.Marshal(data) raw, err := n.codec.Marshal(data)
if err != nil { if err != nil {
return err return err
} }
@ -385,7 +381,7 @@ func (n *node) DeleteStruct(data interface{}) error {
return err return err
} }
id, err := toBytes(cfg.ID.Value.Interface(), n.s.codec) id, err := toBytes(cfg.ID.Value.Interface(), n.codec)
if err != nil { if err != nil {
return err return err
} }
@ -427,9 +423,3 @@ func (n *node) deleteStruct(tx *bolt.Tx, cfg *structConfig, id []byte) error {
return bucket.Delete(id) return bucket.Delete(id)
} }
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (n *node) Remove(data interface{}) error {
return n.DeleteStruct(data)
}

View File

@ -9,7 +9,7 @@ import (
"github.com/asdine/storm/codec/gob" "github.com/asdine/storm/codec/gob"
"github.com/asdine/storm/codec/json" "github.com/asdine/storm/codec/json"
"github.com/asdine/storm/q" "github.com/asdine/storm/q"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -154,7 +154,7 @@ func TestSave(t *testing.T) {
val := bucket.Get(i) val := bucket.Get(i)
require.NotNil(t, val) require.NotNil(t, val)
content, err := db.codec.Marshal(&v) content, err := db.Codec().Marshal(&v)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, content, val) require.Equal(t, content, val)
return nil return nil
@ -312,43 +312,8 @@ func TestSaveEmptyValues(t *testing.T) {
require.Error(t, err) require.Error(t, err)
} }
func TestSaveAutoIncrement(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement())
defer cleanup()
for i := 1; i < 10; i++ {
s := SimpleUser{Name: "John"}
err := db.Save(&s)
require.NoError(t, err)
require.Equal(t, i, s.ID)
}
u := UserWithUint64IDField{Name: "John"}
err := db.Save(&u)
require.NoError(t, err)
require.Equal(t, uint64(1), u.ID)
v := UserWithUint64IDField{}
err = db.One("ID", uint64(1), &v)
require.NoError(t, err)
require.Equal(t, u, v)
ui := UserWithIDField{Name: "John"}
err = db.Save(&ui)
require.NoError(t, err)
require.Equal(t, 1, ui.ID)
vi := UserWithIDField{}
err = db.One("ID", 1, &vi)
require.NoError(t, err)
require.Equal(t, ui, vi)
us := UserWithStringIDField{Name: "John"}
err = db.Save(&us)
require.Error(t, err)
require.Equal(t, ErrZeroID, err)
}
func TestSaveIncrement(t *testing.T) { func TestSaveIncrement(t *testing.T) {
db, cleanup := createDB(t, AutoIncrement()) db, cleanup := createDB(t)
defer cleanup() defer cleanup()
type User struct { type User struct {
@ -382,7 +347,7 @@ func TestSaveDifferentBucketRoot(t *testing.T) {
db, cleanup := createDB(t) db, cleanup := createDB(t)
defer cleanup() defer cleanup()
require.Len(t, db.rootBucket, 0) require.Len(t, db.Node.(*node).rootBucket, 0)
dbSub := db.From("sub").(*node) dbSub := db.From("sub").(*node)
@ -413,10 +378,9 @@ func TestSaveDifferentBucketRoot(t *testing.T) {
func TestSaveEmbedded(t *testing.T) { func TestSaveEmbedded(t *testing.T) {
db, cleanup := createDB(t) db, cleanup := createDB(t)
defer cleanup() defer cleanup()
AutoIncrement()(db)
type Base struct { type Base struct {
ID int `storm:"id"` ID int `storm:"id,increment"`
} }
type User struct { type User struct {

View File

@ -3,14 +3,11 @@ package storm
import ( import (
"bytes" "bytes"
"encoding/binary" "encoding/binary"
"os"
"time" "time"
"github.com/asdine/storm/codec" "github.com/asdine/storm/codec"
"github.com/asdine/storm/codec/json" "github.com/asdine/storm/codec/json"
"github.com/asdine/storm/index" "github.com/coreos/bbolt"
"github.com/asdine/storm/q"
"github.com/boltdb/bolt"
) )
const ( const (
@ -22,95 +19,65 @@ const (
var defaultCodec = json.Codec var defaultCodec = json.Codec
// Open opens a database at the given path with optional Storm options. // Open opens a database at the given path with optional Storm options.
func Open(path string, stormOptions ...func(*DB) error) (*DB, error) { func Open(path string, stormOptions ...func(*Options) error) (*DB, error) {
var err error var err error
s := &DB{ var opts Options
Path: path,
codec: defaultCodec,
}
for _, option := range stormOptions { for _, option := range stormOptions {
if err = option(s); err != nil { if err = option(&opts); err != nil {
return nil, err return nil, err
} }
} }
if s.boltMode == 0 { s := DB{
s.boltMode = 0600 Bolt: opts.bolt,
} }
if s.boltOptions == nil { n := node{
s.boltOptions = &bolt.Options{Timeout: 1 * time.Second} s: &s,
codec: opts.codec,
batchMode: opts.batchMode,
rootBucket: opts.rootBucket,
} }
s.root = &node{s: s, rootBucket: s.rootBucket, codec: s.codec, batchMode: s.batchMode} if n.codec == nil {
n.codec = defaultCodec
}
if opts.boltMode == 0 {
opts.boltMode = 0600
}
if opts.boltOptions == nil {
opts.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
}
s.Node = &n
// skip if UseDB option is used // skip if UseDB option is used
if s.Bolt == nil { if s.Bolt == nil {
s.Bolt, err = bolt.Open(path, s.boltMode, s.boltOptions) s.Bolt, err = bolt.Open(path, opts.boltMode, opts.boltOptions)
if err != nil {
return nil, err
}
err = s.checkVersion()
if err != nil { if err != nil {
return nil, err return nil, err
} }
} }
return s, nil err = s.checkVersion()
if err != nil {
return nil, err
}
return &s, nil
} }
// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the // DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
// needed operations // needed operations
type DB struct { type DB struct {
// Path of the database file // The root node that points to the root bucket.
Path string Node
// Handles encoding and decoding of objects
codec codec.MarshalUnmarshaler
// Bolt is still easily accessible // Bolt is still easily accessible
Bolt *bolt.DB Bolt *bolt.DB
// Bolt file mode
boltMode os.FileMode
// Bolt options
boltOptions *bolt.Options
// Enable auto increment on empty integer fields
autoIncrement bool
// The root node that points to the root bucket.
root *node
// The root bucket name
rootBucket []string
// Enable batch mode for read-write transaction, instead of update mode
batchMode bool
}
// From returns a new Storm node with a new bucket root.
// All DB operations on the new node will be executed relative to the given
// bucket.
func (s *DB) From(root ...string) Node {
newNode := *s.root
newNode.rootBucket = root
return &newNode
}
// WithTransaction returns a New Storm node that will use the given transaction.
func (s *DB) WithTransaction(tx *bolt.Tx) Node {
return s.root.WithTransaction(tx)
}
// Bucket returns the root bucket name as a slice.
// In the normal, simple case this will be empty.
func (s *DB) Bucket() []string {
return s.root.Bucket()
} }
// Close the database // Close the database
@ -118,167 +85,6 @@ func (s *DB) Close() error {
return s.Bolt.Close() return s.Bolt.Close()
} }
// Codec returns the EncodeDecoder used by this instance of Storm
func (s *DB) Codec() codec.MarshalUnmarshaler {
return s.codec
}
// WithCodec returns a New Storm Node that will use the given Codec.
func (s *DB) WithCodec(codec codec.MarshalUnmarshaler) Node {
n := s.From().(*node)
n.codec = codec
return n
}
// WithBatch returns a new Storm Node with the batch mode enabled.
func (s *DB) WithBatch(enabled bool) Node {
n := s.From().(*node)
n.batchMode = enabled
return n
}
// Get a value from a bucket
func (s *DB) Get(bucketName string, key interface{}, to interface{}) error {
return s.root.Get(bucketName, key, to)
}
// Set a key/value pair into a bucket
func (s *DB) Set(bucketName string, key interface{}, value interface{}) error {
return s.root.Set(bucketName, key, value)
}
// Delete deletes a key from a bucket
func (s *DB) Delete(bucketName string, key interface{}) error {
return s.root.Delete(bucketName, key)
}
// GetBytes gets a raw value from a bucket.
func (s *DB) GetBytes(bucketName string, key interface{}) ([]byte, error) {
return s.root.GetBytes(bucketName, key)
}
// SetBytes sets a raw value into a bucket.
func (s *DB) SetBytes(bucketName string, key interface{}, value []byte) error {
return s.root.SetBytes(bucketName, key, value)
}
// Save a structure
func (s *DB) Save(data interface{}) error {
return s.root.Save(data)
}
// PrefixScan scans the root buckets for keys matching the given prefix.
func (s *DB) PrefixScan(prefix string) []Node {
return s.root.PrefixScan(prefix)
}
// RangeScan scans the root buckets over a range such as a sortable time range.
func (s *DB) RangeScan(min, max string) []Node {
return s.root.RangeScan(min, max)
}
// Select a list of records that match a list of matchers. Doesn't use indexes.
func (s *DB) Select(matchers ...q.Matcher) Query {
return s.root.Select(matchers...)
}
// Range returns one or more records by the specified index within the specified range
func (s *DB) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
return s.root.Range(fieldName, min, max, to, options...)
}
// Prefix returns one or more records whose given field starts with the specified prefix.
func (s *DB) Prefix(fieldName string, prefix string, to interface{}, options ...func(*index.Options)) error {
return s.root.Prefix(fieldName, prefix, to, options...)
}
// AllByIndex gets all the records of a bucket that are indexed in the specified index
func (s *DB) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
return s.root.AllByIndex(fieldName, to, options...)
}
// All get all the records of a bucket
func (s *DB) All(to interface{}, options ...func(*index.Options)) error {
return s.root.All(to, options...)
}
// Count counts all the records of a bucket
func (s *DB) Count(data interface{}) (int, error) {
return s.root.Count(data)
}
// DeleteStruct deletes a structure from the associated bucket
func (s *DB) DeleteStruct(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Remove deletes a structure from the associated bucket
// Deprecated: Use DeleteStruct instead.
func (s *DB) Remove(data interface{}) error {
return s.root.DeleteStruct(data)
}
// Drop a bucket
func (s *DB) Drop(data interface{}) error {
return s.root.Drop(data)
}
// Find returns one or more records by the specified index
func (s *DB) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
return s.root.Find(fieldName, value, to, options...)
}
// Init creates the indexes and buckets for a given structure
func (s *DB) Init(data interface{}) error {
return s.root.Init(data)
}
// ReIndex rebuilds all the indexes of a bucket
func (s *DB) ReIndex(data interface{}) error {
return s.root.ReIndex(data)
}
// One returns one record by the specified index
func (s *DB) One(fieldName string, value interface{}, to interface{}) error {
return s.root.One(fieldName, value, to)
}
// Begin starts a new transaction.
func (s *DB) Begin(writable bool) (Node, error) {
return s.root.Begin(writable)
}
// Rollback closes the transaction and ignores all previous updates.
func (s *DB) Rollback() error {
return s.root.Rollback()
}
// Commit writes all changes to disk.
func (s *DB) Commit() error {
return s.root.Rollback()
}
// Update a structure
func (s *DB) Update(data interface{}) error {
return s.root.Update(data)
}
// UpdateField updates a single field
func (s *DB) UpdateField(data interface{}, fieldName string, value interface{}) error {
return s.root.UpdateField(data, fieldName, value)
}
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
// already exist.
func (s *DB) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
return s.root.CreateBucketIfNotExists(tx, bucket)
}
// GetBucket returns the given bucket below the current node.
func (s *DB) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
return s.root.GetBucket(tx, children...)
}
func (s *DB) checkVersion() error { func (s *DB) checkVersion() error {
var v string var v string
err := s.Get(dbinfo, "version", &v) err := s.Get(dbinfo, "version", &v)
@ -286,8 +92,9 @@ func (s *DB) checkVersion() error {
return err return err
} }
// for now, we only set the current version if it doesn't exist or if v0.5.0 // for now, we only set the current version if it doesn't exist.
if v == "" || v == "0.5.0" || v == "0.6.0" { // v1 and v2 database files are compatible.
if v == "" {
return s.Set(dbinfo, "version", Version) return s.Set(dbinfo, "version", Version)
} }

View File

@ -12,7 +12,7 @@ import (
"time" "time"
"github.com/asdine/storm/codec/json" "github.com/asdine/storm/codec/json"
"github.com/boltdb/bolt" "github.com/coreos/bbolt"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
@ -33,7 +33,6 @@ func TestNewStorm(t *testing.T) {
require.Implements(t, (*Node)(nil), db) require.Implements(t, (*Node)(nil), db)
require.NoError(t, err) require.NoError(t, err)
require.Equal(t, file, db.Path)
require.NotNil(t, db.Bolt) require.NotNil(t, db.Bolt)
require.Equal(t, defaultCodec, db.Codec()) require.Equal(t, defaultCodec, db.Codec())
@ -48,13 +47,9 @@ func TestNewStormWithStormOptions(t *testing.T) {
defer os.RemoveAll(dir) defer os.RemoveAll(dir)
dc := new(dummyCodec) dc := new(dummyCodec)
db1, _ := Open(filepath.Join(dir, "storm1.db"), BoltOptions(0660, &bolt.Options{Timeout: 10 * time.Second}), Codec(dc), AutoIncrement(), Root("a", "b")) db1, _ := Open(filepath.Join(dir, "storm1.db"), BoltOptions(0660, &bolt.Options{Timeout: 10 * time.Second}), Codec(dc), Root("a", "b"))
require.Equal(t, dc, db1.Codec()) require.Equal(t, dc, db1.Codec())
require.True(t, db1.autoIncrement) require.Equal(t, []string{"a", "b"}, db1.Node.(*node).rootBucket)
require.Equal(t, os.FileMode(0660), db1.boltMode)
require.Equal(t, 10*time.Second, db1.boltOptions.Timeout)
require.Equal(t, []string{"a", "b"}, db1.rootBucket)
require.Equal(t, []string{"a", "b"}, db1.root.rootBucket)
err := db1.Save(&SimpleUser{ID: 1}) err := db1.Save(&SimpleUser{ID: 1})
require.NoError(t, err) require.NoError(t, err)
@ -70,7 +65,7 @@ func TestNewStormWithBatch(t *testing.T) {
db1, _ := Open(filepath.Join(dir, "storm1.db"), Batch()) db1, _ := Open(filepath.Join(dir, "storm1.db"), Batch())
defer db1.Close() defer db1.Close()
require.True(t, db1.root.batchMode) require.True(t, db1.Node.(*node).batchMode)
n := db1.From().(*node) n := db1.From().(*node)
require.True(t, n.batchMode) require.True(t, n.batchMode)
n = db1.WithBatch(true).(*node) n = db1.WithBatch(true).(*node)
@ -163,7 +158,7 @@ func TestToBytes(t *testing.T) {
} }
} }
func createDB(t errorHandler, opts ...func(*DB) error) (*DB, func()) { func createDB(t errorHandler, opts ...func(*Options) error) (*DB, func()) {
dir, err := ioutil.TempDir(os.TempDir(), "storm") dir, err := ioutil.TempDir(os.TempDir(), "storm")
if err != nil { if err != nil {
t.Error(err) t.Error(err)

View File

@ -1,9 +1,9 @@
package storm package storm
import "github.com/boltdb/bolt" import "github.com/coreos/bbolt"
// Tx is a transaction // tx is a transaction
type Tx interface { type tx interface {
// Commit writes all changes to disk. // Commit writes all changes to disk.
Commit() error Commit() error

View File

@ -1,4 +1,4 @@
package storm package storm
// Version of Storm // Version of Storm
const Version = "1.0.0" const Version = "2.0.0"

View File

@ -77,6 +77,8 @@ func (hm HandshakeMessage) ToBody() (HandshakeMessageBody, error) {
body = new(ClientHelloBody) body = new(ClientHelloBody)
case HandshakeTypeServerHello: case HandshakeTypeServerHello:
body = new(ServerHelloBody) body = new(ServerHelloBody)
case HandshakeTypeHelloRetryRequest:
body = new(HelloRetryRequestBody)
case HandshakeTypeEncryptedExtensions: case HandshakeTypeEncryptedExtensions:
body = new(EncryptedExtensionsBody) body = new(EncryptedExtensionsBody)
case HandshakeTypeCertificate: case HandshakeTypeCertificate:

View File

@ -120,6 +120,29 @@ func (ch ClientHelloBody) Truncated() ([]byte, error) {
return chData[:chLen-binderLen], nil return chData[:chLen-binderLen], nil
} }
// struct {
// ProtocolVersion server_version;
// CipherSuite cipher_suite;
// Extension extensions<2..2^16-1>;
// } HelloRetryRequest;
type HelloRetryRequestBody struct {
Version uint16
CipherSuite CipherSuite
Extensions ExtensionList `tls:"head=2,min=2"`
}
func (hrr HelloRetryRequestBody) Type() HandshakeType {
return HandshakeTypeHelloRetryRequest
}
func (hrr HelloRetryRequestBody) Marshal() ([]byte, error) {
return syntax.Marshal(hrr)
}
func (hrr *HelloRetryRequestBody) Unmarshal(data []byte) (int, error) {
return syntax.Unmarshal(data, hrr)
}
// struct { // struct {
// ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */ // ProtocolVersion legacy_version = 0x0303; /* TLS v1.2 */
// Random random; // Random random;

View File

@ -79,6 +79,16 @@ var (
}, },
} }
// HelloRetryRequest test cases
hrrValidIn = HelloRetryRequestBody{
Version: supportedVersion,
CipherSuite: 0x0001,
Extensions: extListValidIn,
}
hrrEmptyIn = HelloRetryRequestBody{}
hrrValidHex = supportedVersionHex + "0001" + extListValidHex
hrrEmptyHex = supportedVersionHex + "0001" + "0000"
// ServerHello test cases // ServerHello test cases
shValidIn = ServerHelloBody{ shValidIn = ServerHelloBody{
Version: tls12Version, Version: tls12Version,
@ -342,6 +352,34 @@ func TestClientHelloTruncate(t *testing.T) {
assertError(t, err, "Truncated a ClientHello with a mal-formed PSK") assertError(t, err, "Truncated a ClientHello with a mal-formed PSK")
} }
func TestHelloRetryRequestMarshalUnmarshal(t *testing.T) {
hrrValid := unhex(hrrValidHex)
hrrEmpty := unhex(hrrEmptyHex)
// Test correctness of handshake type
assertEquals(t, (HelloRetryRequestBody{}).Type(), HandshakeTypeHelloRetryRequest)
// Test successful marshal
out, err := hrrValidIn.Marshal()
assertNotError(t, err, "Failed to marshal a valid HelloRetryRequest")
assertByteEquals(t, out, hrrValid)
// Test marshal failure with no extensions present
out, err = hrrEmptyIn.Marshal()
assertError(t, err, "Marshaled HelloRetryRequest with no extensions")
// Test successful unmarshal
var hrr HelloRetryRequestBody
read, err := hrr.Unmarshal(hrrValid)
assertNotError(t, err, "Failed to unmarshal a valid HelloRetryRequest")
assertEquals(t, read, len(hrrValid))
assertDeepEquals(t, hrr, hrrValidIn)
// Test unmarshal failure with no extensions present
read, err = hrr.Unmarshal(hrrEmpty)
assertError(t, err, "Unmarshaled a HelloRetryRequest with no extensions")
}
func TestServerHelloMarshalUnmarshal(t *testing.T) { func TestServerHelloMarshalUnmarshal(t *testing.T) {
shValid := unhex(shValidHex) shValid := unhex(shValidHex)
shEmpty := unhex(shEmptyHex) shEmpty := unhex(shEmptyHex)

View File

@ -1,28 +0,0 @@
package bolt
import "unsafe"
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned bool
func init() {
// Simple check to see whether this arch handles unaligned load/stores
// correctly.
// ARM9 and older devices require load/stores to be from/to aligned
// addresses. If not, the lower 2 bits are cleared and that address is
// read in a jumbled up order.
// See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html
raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11}
val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2))
brokenUnaligned = val != 0x11222211
}

View File

@ -15,11 +15,11 @@ and setting values. That's it.
## Project Status ## Project Status
Bolt is stable, the API is fixed, and the file format is fixed. Full unit Bolt is stable and the API is fixed. Full unit test coverage and randomized
test coverage and randomized black box testing are used to ensure database black box testing are used to ensure database consistency and thread safety.
consistency and thread safety. Bolt is currently used in high-load production Bolt is currently in high-load production environments serving databases as
environments serving databases as large as 1TB. Many companies such as large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
Shopify and Heroku use Bolt-backed services every day. services every day.
## Table of Contents ## Table of Contents
@ -209,7 +209,7 @@ and then safely close your transaction if an error is returned. This is the
recommended way to use Bolt transactions. recommended way to use Bolt transactions.
However, sometimes you may want to manually start and end your transactions. However, sometimes you may want to manually start and end your transactions.
You can use the `DB.Begin()` function directly but **please** be sure to close You can use the `Tx.Begin()` function directly but **please** be sure to close
the transaction. the transaction.
```go ```go
@ -395,7 +395,7 @@ db.View(func(tx *bolt.Tx) error {
c := tx.Bucket([]byte("MyBucket")).Cursor() c := tx.Bucket([]byte("MyBucket")).Cursor()
prefix := []byte("1234") prefix := []byte("1234")
for k, v := c.Seek(prefix); k != nil && bytes.HasPrefix(k, prefix); k, v = c.Next() { for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
fmt.Printf("key=%s, value=%s\n", k, v) fmt.Printf("key=%s, value=%s\n", k, v)
} }
@ -448,10 +448,6 @@ db.View(func(tx *bolt.Tx) error {
}) })
``` ```
Please note that keys and values in `ForEach()` are only valid while
the transaction is open. If you need to use a key or value outside of
the transaction, you must use `copy()` to copy it to another byte
slice.
### Nested buckets ### Nested buckets
@ -464,55 +460,6 @@ func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
func (*Bucket) DeleteBucket(key []byte) error func (*Bucket) DeleteBucket(key []byte) error
``` ```
Say you had a multi-tenant application where the root level bucket was the account bucket. Inside of this bucket was a sequence of accounts which themselves are buckets. And inside the sequence bucket you could have many buckets pertaining to the Account itself (Users, Notes, etc) isolating the information into logical groupings.
```go
// createUser creates a new user in the given account.
func createUser(accountID int, u *User) error {
// Start the transaction.
tx, err := db.Begin(true)
if err != nil {
return err
}
defer tx.Rollback()
// Retrieve the root bucket for the account.
// Assume this has already been created when the account was set up.
root := tx.Bucket([]byte(strconv.FormatUint(accountID, 10)))
// Setup the users bucket.
bkt, err := root.CreateBucketIfNotExists([]byte("USERS"))
if err != nil {
return err
}
// Generate an ID for the new user.
userID, err := bkt.NextSequence()
if err != nil {
return err
}
u.ID = userID
// Marshal and save the encoded user.
if buf, err := json.Marshal(u); err != nil {
return err
} else if err := bkt.Put([]byte(strconv.FormatUint(u.ID, 10)), buf); err != nil {
return err
}
// Commit the transaction.
if err := tx.Commit(); err != nil {
return err
}
return nil
}
```
### Database backups ### Database backups
@ -768,9 +715,6 @@ Here are a few things to note when evaluating and using Bolt:
can be reused by a new page or can be unmapped from virtual memory and you'll can be reused by a new page or can be unmapped from virtual memory and you'll
see an `unexpected fault address` panic when accessing it. see an `unexpected fault address` panic when accessing it.
* Bolt uses an exclusive write lock on the database file so it cannot be
shared by multiple processes.
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for * Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
buckets that have random inserts will cause your database to have very poor buckets that have random inserts will cause your database to have very poor
page utilization. page utilization.
@ -904,13 +848,5 @@ Below is a list of public, open source projects that use Bolt:
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. * [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. * [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files.
* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. * [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter.
* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development.
* [gopherpit](https://github.com/gopherpit/gopherpit) - A web service to manage Go remote import paths with custom domains
* [bolter](https://github.com/hasit/bolter) - Command-line app for viewing BoltDB file in your terminal.
* [btcwallet](https://github.com/btcsuite/btcwallet) - A bitcoin wallet.
* [dcrwallet](https://github.com/decred/dcrwallet) - A wallet for the Decred cryptocurrency.
* [Ironsmith](https://github.com/timshannon/ironsmith) - A simple, script-driven continuous integration (build - > test -> release) tool, with no external dependencies
* [BoltHold](https://github.com/timshannon/bolthold) - An embeddable NoSQL store for Go types built on BoltDB
* [Ponzu CMS](https://ponzu-cms.org) - Headless CMS + automatic JSON API with auto-HTTPS, HTTP/2 Server Push, and flexible server framework.
If you are using Bolt in a project please send a pull request to add it to the list. If you are using Bolt in a project please send a pull request to add it to the list.

View File

@ -5,6 +5,3 @@ const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers. // maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF const maxAllocSize = 0xFFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -5,6 +5,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers. // maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

7
vendor/github.com/coreos/bbolt/bolt_arm.go generated vendored Normal file
View File

@ -0,0 +1,7 @@
package bolt
// maxMapSize represents the largest mmap size supported by Bolt.
const maxMapSize = 0x7FFFFFFF // 2GB
// maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0xFFFFFFF

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers. // maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers. // maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers. // maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -7,6 +7,3 @@ const maxMapSize = 0xFFFFFFFFFFFF // 256TB
// maxAllocSize is the size used when creating array pointers. // maxAllocSize is the size used when creating array pointers.
const maxAllocSize = 0x7FFFFFFF const maxAllocSize = 0x7FFFFFFF
// Are unaligned load/stores broken on this arch?
var brokenUnaligned = false

View File

@ -89,7 +89,7 @@ func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) erro
func funlock(db *DB) error { func funlock(db *DB) error {
err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{})
db.lockfile.Close() db.lockfile.Close()
os.Remove(db.path + lockExt) os.Remove(db.path+lockExt)
return err return err
} }

View File

@ -130,17 +130,9 @@ func (b *Bucket) Bucket(name []byte) *Bucket {
func (b *Bucket) openBucket(value []byte) *Bucket { func (b *Bucket) openBucket(value []byte) *Bucket {
var child = newBucket(b.tx) var child = newBucket(b.tx)
// If unaligned load/stores are broken on this arch and value is
// unaligned simply clone to an aligned byte array.
unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0
if unaligned {
value = cloneBytes(value)
}
// If this is a writable transaction then we need to copy the bucket entry. // If this is a writable transaction then we need to copy the bucket entry.
// Read-only transactions can point directly at the mmap entry. // Read-only transactions can point directly at the mmap entry.
if b.tx.writable && !unaligned { if b.tx.writable {
child.bucket = &bucket{} child.bucket = &bucket{}
*child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) *child.bucket = *(*bucket)(unsafe.Pointer(&value[0]))
} else { } else {
@ -175,8 +167,9 @@ func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) {
if bytes.Equal(key, k) { if bytes.Equal(key, k) {
if (flags & bucketLeafFlag) != 0 { if (flags & bucketLeafFlag) != 0 {
return nil, ErrBucketExists return nil, ErrBucketExists
} else {
return nil, ErrIncompatibleValue
} }
return nil, ErrIncompatibleValue
} }
// Create empty, inline bucket. // Create empty, inline bucket.
@ -336,28 +329,6 @@ func (b *Bucket) Delete(key []byte) error {
return nil return nil
} }
// Sequence returns the current integer for the bucket without incrementing it.
func (b *Bucket) Sequence() uint64 { return b.bucket.sequence }
// SetSequence updates the sequence number for the bucket.
func (b *Bucket) SetSequence(v uint64) error {
if b.tx.db == nil {
return ErrTxClosed
} else if !b.Writable() {
return ErrTxNotWritable
}
// Materialize the root node if it hasn't been already so that the
// bucket will be saved during commit.
if b.rootNode == nil {
_ = b.node(b.root, nil)
}
// Increment and return the sequence.
b.bucket.sequence = v
return nil
}
// NextSequence returns an autoincrementing integer for the bucket. // NextSequence returns an autoincrementing integer for the bucket.
func (b *Bucket) NextSequence() (uint64, error) { func (b *Bucket) NextSequence() (uint64, error) {
if b.tx.db == nil { if b.tx.db == nil {

View File

@ -782,48 +782,6 @@ func TestBucket_DeleteBucket_IncompatibleValue(t *testing.T) {
} }
} }
// Ensure bucket can set and update its sequence number.
func TestBucket_Sequence(t *testing.T) {
db := MustOpenDB()
defer db.MustClose()
if err := db.Update(func(tx *bolt.Tx) error {
bkt, err := tx.CreateBucket([]byte("0"))
if err != nil {
t.Fatal(err)
}
// Retrieve sequence.
if v := bkt.Sequence(); v != 0 {
t.Fatalf("unexpected sequence: %d", v)
}
// Update sequence.
if err := bkt.SetSequence(1000); err != nil {
t.Fatal(err)
}
// Read sequence again.
if v := bkt.Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
// Verify sequence in separate transaction.
if err := db.View(func(tx *bolt.Tx) error {
if v := tx.Bucket([]byte("0")).Sequence(); v != 1000 {
t.Fatalf("unexpected sequence: %d", v)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
// Ensure that a bucket can return an autoincrementing sequence. // Ensure that a bucket can return an autoincrementing sequence.
func TestBucket_NextSequence(t *testing.T) { func TestBucket_NextSequence(t *testing.T) {
db := MustOpenDB() db := MustOpenDB()

View File

@ -552,10 +552,7 @@ func (db *DB) removeTx(tx *Tx) {
// Remove the transaction. // Remove the transaction.
for i, t := range db.txs { for i, t := range db.txs {
if t == tx { if t == tx {
last := len(db.txs) - 1 db.txs = append(db.txs[:i], db.txs[i+1:]...)
db.txs[i] = db.txs[last]
db.txs[last] = nil
db.txs = db.txs[:last]
break break
} }
} }
@ -955,7 +952,7 @@ func (s *Stats) Sub(other *Stats) Stats {
diff.PendingPageN = s.PendingPageN diff.PendingPageN = s.PendingPageN
diff.FreeAlloc = s.FreeAlloc diff.FreeAlloc = s.FreeAlloc
diff.FreelistInuse = s.FreelistInuse diff.FreelistInuse = s.FreelistInuse
diff.TxN = s.TxN - other.TxN diff.TxN = other.TxN - s.TxN
diff.TxStats = s.TxStats.Sub(&other.TxStats) diff.TxStats = s.TxStats.Sub(&other.TxStats)
return diff return diff
} }

View File

@ -12,6 +12,7 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"regexp" "regexp"
"runtime"
"sort" "sort"
"strings" "strings"
"sync" "sync"
@ -179,6 +180,69 @@ func TestOpen_ErrChecksum(t *testing.T) {
} }
} }
// Ensure that opening an already open database file will timeout.
func TestOpen_Timeout(t *testing.T) {
if runtime.GOOS == "solaris" {
t.Skip("solaris fcntl locks don't support intra-process locking")
}
path := tempfile()
// Open a data file.
db0, err := bolt.Open(path, 0666, nil)
if err != nil {
t.Fatal(err)
} else if db0 == nil {
t.Fatal("expected database")
}
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 100 * time.Millisecond})
if err != bolt.ErrTimeout {
t.Fatalf("unexpected timeout: %s", err)
} else if db1 != nil {
t.Fatal("unexpected database")
} else if time.Since(start) <= 100*time.Millisecond {
t.Fatal("expected to wait at least timeout duration")
}
if err := db0.Close(); err != nil {
t.Fatal(err)
}
}
// Ensure that opening an already open database file will wait until its closed.
func TestOpen_Wait(t *testing.T) {
if runtime.GOOS == "solaris" {
t.Skip("solaris fcntl locks don't support intra-process locking")
}
path := tempfile()
// Open a data file.
db0, err := bolt.Open(path, 0666, nil)
if err != nil {
t.Fatal(err)
}
// Close it in just a bit.
time.AfterFunc(100*time.Millisecond, func() { _ = db0.Close() })
// Attempt to open the database again.
start := time.Now()
db1, err := bolt.Open(path, 0666, &bolt.Options{Timeout: 200 * time.Millisecond})
if err != nil {
t.Fatal(err)
} else if time.Since(start) <= 100*time.Millisecond {
t.Fatal("expected to wait at least timeout duration")
}
if err := db1.Close(); err != nil {
t.Fatal(err)
}
}
// Ensure that opening a database does not increase its size. // Ensure that opening a database does not increase its size.
// https://github.com/boltdb/bolt/issues/291 // https://github.com/boltdb/bolt/issues/291
func TestOpen_Size(t *testing.T) { func TestOpen_Size(t *testing.T) {
@ -362,6 +426,103 @@ func TestOpen_FileTooSmall(t *testing.T) {
} }
} }
// Ensure that a database can be opened in read-only mode by multiple processes
// and that a database can not be opened in read-write mode and in read-only
// mode at the same time.
func TestOpen_ReadOnly(t *testing.T) {
if runtime.GOOS == "solaris" {
t.Skip("solaris fcntl locks don't support intra-process locking")
}
bucket, key, value := []byte(`bucket`), []byte(`key`), []byte(`value`)
path := tempfile()
// Open in read-write mode.
db, err := bolt.Open(path, 0666, nil)
if err != nil {
t.Fatal(err)
} else if db.IsReadOnly() {
t.Fatal("db should not be in read only mode")
}
if err := db.Update(func(tx *bolt.Tx) error {
b, err := tx.CreateBucket(bucket)
if err != nil {
return err
}
if err := b.Put(key, value); err != nil {
t.Fatal(err)
}
return nil
}); err != nil {
t.Fatal(err)
}
if err := db.Close(); err != nil {
t.Fatal(err)
}
// Open in read-only mode.
db0, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
if err != nil {
t.Fatal(err)
}
// Opening in read-write mode should return an error.
if _, err = bolt.Open(path, 0666, &bolt.Options{Timeout: time.Millisecond * 100}); err == nil {
t.Fatal("expected error")
}
// And again (in read-only mode).
db1, err := bolt.Open(path, 0666, &bolt.Options{ReadOnly: true})
if err != nil {
t.Fatal(err)
}
// Verify both read-only databases are accessible.
for _, db := range []*bolt.DB{db0, db1} {
// Verify is is in read only mode indeed.
if !db.IsReadOnly() {
t.Fatal("expected read only mode")
}
// Read-only databases should not allow updates.
if err := db.Update(func(*bolt.Tx) error {
panic(`should never get here`)
}); err != bolt.ErrDatabaseReadOnly {
t.Fatalf("unexpected error: %s", err)
}
// Read-only databases should not allow beginning writable txns.
if _, err := db.Begin(true); err != bolt.ErrDatabaseReadOnly {
t.Fatalf("unexpected error: %s", err)
}
// Verify the data.
if err := db.View(func(tx *bolt.Tx) error {
b := tx.Bucket(bucket)
if b == nil {
return fmt.Errorf("expected bucket `%s`", string(bucket))
}
got := string(b.Get(key))
expected := string(value)
if got != expected {
return fmt.Errorf("expected `%s`, got `%s`", expected, got)
}
return nil
}); err != nil {
t.Fatal(err)
}
}
if err := db0.Close(); err != nil {
t.Fatal(err)
}
if err := db1.Close(); err != nil {
t.Fatal(err)
}
}
// TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough // TestDB_Open_InitialMmapSize tests if having InitialMmapSize large enough
// to hold data from concurrent write transaction resolves the issue that // to hold data from concurrent write transaction resolves the issue that
// read transaction blocks the write transaction and causes deadlock. // read transaction blocks the write transaction and causes deadlock.

View File

@ -24,12 +24,7 @@ func newFreelist() *freelist {
// size returns the size of the page after serialization. // size returns the size of the page after serialization.
func (f *freelist) size() int { func (f *freelist) size() int {
n := f.count() return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count())
if n >= 0xFFFF {
// The first element will be used to store the count. See freelist.write.
n++
}
return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * n)
} }
// count returns count of pages on the freelist // count returns count of pages on the freelist
@ -51,15 +46,16 @@ func (f *freelist) pending_count() int {
return count return count
} }
// copyall copies into dst a list of all free ids and all pending ids in one sorted list. // all returns a list of all free ids and all pending ids in one sorted list.
// f.count returns the minimum length required for dst. func (f *freelist) all() []pgid {
func (f *freelist) copyall(dst []pgid) { m := make(pgids, 0)
m := make(pgids, 0, f.pending_count())
for _, list := range f.pending { for _, list := range f.pending {
m = append(m, list...) m = append(m, list...)
} }
sort.Sort(m) sort.Sort(m)
mergepgids(dst, f.ids, m) return pgids(f.ids).merge(m)
} }
// allocate returns the starting page id of a contiguous list of pages of a given size. // allocate returns the starting page id of a contiguous list of pages of a given size.
@ -190,22 +186,22 @@ func (f *freelist) read(p *page) {
// become free. // become free.
func (f *freelist) write(p *page) error { func (f *freelist) write(p *page) error {
// Combine the old free pgids and pgids waiting on an open transaction. // Combine the old free pgids and pgids waiting on an open transaction.
ids := f.all()
// Update the header flag. // Update the header flag.
p.flags |= freelistPageFlag p.flags |= freelistPageFlag
// The page.count can only hold up to 64k elements so if we overflow that // The page.count can only hold up to 64k elements so if we overflow that
// number then we handle it by putting the size in the first element. // number then we handle it by putting the size in the first element.
lenids := f.count() if len(ids) == 0 {
if lenids == 0 { p.count = uint16(len(ids))
p.count = uint16(lenids) } else if len(ids) < 0xFFFF {
} else if lenids < 0xFFFF { p.count = uint16(len(ids))
p.count = uint16(lenids) copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids)
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:])
} else { } else {
p.count = 0xFFFF p.count = 0xFFFF
((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(lenids) ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids))
f.copyall(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:]) copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids)
} }
return nil return nil
@ -240,7 +236,7 @@ func (f *freelist) reload(p *page) {
// reindex rebuilds the free cache based on available and pending free lists. // reindex rebuilds the free cache based on available and pending free lists.
func (f *freelist) reindex() { func (f *freelist) reindex() {
f.cache = make(map[pgid]bool, len(f.ids)) f.cache = make(map[pgid]bool)
for _, id := range f.ids { for _, id := range f.ids {
f.cache[id] = true f.cache[id] = true
} }

View File

@ -145,33 +145,12 @@ func (a pgids) merge(b pgids) pgids {
// Return the opposite slice if one is nil. // Return the opposite slice if one is nil.
if len(a) == 0 { if len(a) == 0 {
return b return b
} } else if len(b) == 0 {
if len(b) == 0 {
return a return a
} }
merged := make(pgids, len(a)+len(b))
mergepgids(merged, a, b)
return merged
}
// mergepgids copies the sorted union of a and b into dst. // Create a list to hold all elements from both lists.
// If dst is too small, it panics. merged := make(pgids, 0, len(a)+len(b))
func mergepgids(dst, a, b pgids) {
if len(dst) < len(a)+len(b) {
panic(fmt.Errorf("mergepgids bad len %d < %d + %d", len(dst), len(a), len(b)))
}
// Copy in the opposite slice if one is nil.
if len(a) == 0 {
copy(dst, b)
return
}
if len(b) == 0 {
copy(dst, a)
return
}
// Merged will hold all elements from both lists.
merged := dst[:0]
// Assign lead to the slice with a lower starting value, follow to the higher value. // Assign lead to the slice with a lower starting value, follow to the higher value.
lead, follow := a, b lead, follow := a, b
@ -193,5 +172,7 @@ func mergepgids(dst, a, b pgids) {
} }
// Append what's left in follow. // Append what's left in follow.
_ = append(merged, follow...) merged = append(merged, follow...)
return merged
} }

View File

@ -50,17 +50,9 @@ func (t testdata) Less(i, j int) bool { return bytes.Compare(t[i].Key, t[j].Key)
func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value { func (t testdata) Generate(rand *rand.Rand, size int) reflect.Value {
n := rand.Intn(qmaxitems-1) + 1 n := rand.Intn(qmaxitems-1) + 1
items := make(testdata, n) items := make(testdata, n)
used := make(map[string]bool)
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
item := &items[i] item := &items[i]
// Ensure that keys are unique by looping until we find one that we have not already used. item.Key = randByteSlice(rand, 1, qmaxksize)
for {
item.Key = randByteSlice(rand, 1, qmaxksize)
if !used[string(item.Key)] {
used[string(item.Key)] = true
break
}
}
item.Value = randByteSlice(rand, 0, qmaxvsize) item.Value = randByteSlice(rand, 0, qmaxvsize)
} }
return reflect.ValueOf(items) return reflect.ValueOf(items)

View File

@ -381,9 +381,7 @@ func (tx *Tx) Check() <-chan error {
func (tx *Tx) check(ch chan error) { func (tx *Tx) check(ch chan error) {
// Check if any pages are double freed. // Check if any pages are double freed.
freed := make(map[pgid]bool) freed := make(map[pgid]bool)
all := make([]pgid, tx.db.freelist.count()) for _, id := range tx.db.freelist.all() {
tx.db.freelist.copyall(all)
for _, id := range all {
if freed[id] { if freed[id] {
ch <- fmt.Errorf("page %d: already freed", id) ch <- fmt.Errorf("page %d: already freed", id)
} }

View File

@ -57,7 +57,7 @@ func (h *extensionHandlerClient) Send(hType mint.HandshakeType, el *mint.Extensi
func (h *extensionHandlerClient) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error { func (h *extensionHandlerClient) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error {
ext := &tlsExtensionBody{} ext := &tlsExtensionBody{}
found := el.Find(ext) found, _ := el.Find(ext)
if hType != mint.HandshakeTypeEncryptedExtensions && hType != mint.HandshakeTypeNewSessionTicket { if hType != mint.HandshakeTypeEncryptedExtensions && hType != mint.HandshakeTypeNewSessionTicket {
if found { if found {

View File

@ -66,7 +66,7 @@ func (h *extensionHandlerServer) Send(hType mint.HandshakeType, el *mint.Extensi
func (h *extensionHandlerServer) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error { func (h *extensionHandlerServer) Receive(hType mint.HandshakeType, el *mint.ExtensionList) error {
ext := &tlsExtensionBody{} ext := &tlsExtensionBody{}
found := el.Find(ext) found, _ := el.Find(ext)
if hType != mint.HandshakeTypeClientHello { if hType != mint.HandshakeTypeClientHello {
if found { if found {

View File

@ -18,4 +18,11 @@ install: true
# don't call go test -v because we want to be able to only show t.Log output when # don't call go test -v because we want to be able to only show t.Log output when
# a test fails # a test fails
script: go test -tags CI -race $(go list ./... | grep -v /vendor/) script: go test -race $(go list ./... | grep -v /vendor/)
# run a test for every major OS
env:
- GOOS=linux
- GOOS=windows
- GOOS=darwin

View File

@ -22,16 +22,15 @@ or other trivial change.
## Dependency Management ## Dependency Management
Currently mage has no dependencies(!) outside the standard libary. Let's keep Currently mage has no dependencies(!). Let's try to keep it that way. Since
it that way. Since it's likely that mage will be vendored into a project, it's likely that mage will be vendored into a project, adding dependencies to
adding dependencies to mage adds dependencies to every project that uses mage. mage adds dependencies to every project that uses mage.
## Versions ## Versions
Please avoid using features of go and the stdlib that prevent mage from being Please try to avoid using features of go and the stdlib that prevent mage from
buildable with older versions of Go. The CI tests currently check that mage is being buildable with old versions of Go. Definitely avoid anything that
buildable with go 1.7 and later. You may build with whatever version you like, requires go 1.9.
but CI has the final say.
## Testing ## Testing

View File

@ -1,35 +1,9 @@
<p align="center"><img src="https://user-images.githubusercontent.com/3185864/32058716-5ee9b512-ba38-11e7-978a-287eb2a62743.png"/></p> <h1 align=center>mage</h1>
<p align="center"><img src="https://user-images.githubusercontent.com/3185864/31061203-6f6743dc-a6ec-11e7-9469-b8d667d9bc3f.png"/></p>
## About [![Build Status](https://travis-ci.org/magefile/mage.svg?branch=master)](https://travis-ci.org/magefile/mage) <p align="center">Mage is a make/rake-like build tool using Go.</p>
Mage is a make/rake-like build tool using Go. You write plain-old go functions, [![Build Status](https://travis-ci.org/magefile/mage.svg?branch=master)](https://travis-ci.org/magefile/mage)
and Mage automatically uses them as Makefile-like runnable targets.
## Installation
Mage has no dependencies outside the Go standard library, and builds with Go 1.7
and above (possibly even lower versions, but they're not regularly tested).
Install mage by running
```
go get -u -d github.com/magefile/mage
cd $GOPATH/src/github.com/magefile/mage
go run bootstrap.go
```
This will download the code into your GOPATH, and then run the bootstrap script
to build mage with version infomation embedded in it. A normal `go get`
(without -d) will build the binary correctly, but no version info will be
embedded. If you've done this, no worries, just go to
$GOPATH/src/github.com/magefile/mage and run `mage install` or `go run
bootstrap.go` and a new binary will be created with the correct version
information.
The mage binary will be created in your $GOPATH/bin directory.
You may also install a binary release from our
[releases](https://github.com/magefile/mage/releases) page.
## Demo ## Demo

View File

@ -1,19 +0,0 @@
//+build ignore
package main
import (
"os"
"github.com/magefile/mage/mage"
)
// This is a bootstrap builder, to build mage when you don't already *have* mage.
// Run it like
// go run bootstrap.go
// and it will install mage with all the right flags created for you.
func main() {
os.Args = []string{os.Args[0], "-v", "install"}
os.Exit(mage.Main())
}

View File

@ -1,31 +0,0 @@
//+build CI
package main
import (
"io/ioutil"
"os"
"path/filepath"
"runtime"
"testing"
)
func TestBootstrap(t *testing.T) {
dir, err := ioutil.TempDir("", "")
if err != nil {
t.Fatal(err)
}
defer os.RemoveAll(dir)
s, err := run("go", "run", "bootstrap.go")
if err != nil {
t.Fatal(s)
}
name := "mage"
if runtime.GOOS == "windows" {
name += ".exe"
}
if _, err := os.Stat(filepath.Join(os.Getenv("GOPATH"), "bin", name)); err != nil {
t.Fatal(err)
}
}

View File

@ -3,46 +3,23 @@
package main package main
import ( import (
"bytes"
"errors" "errors"
"fmt" "fmt"
"os" "os"
"path/filepath"
"runtime"
"strings"
"time" "time"
"github.com/magefile/mage/sh" "github.com/magefile/mage/sh"
) )
// Runs "go install" for mage. This generates the version info the binary. // Runs "go install" for mage. This generates the version info the binary.
func Install() error { func Build() error {
ldf, err := flags() ldf, err := flags()
if err != nil { if err != nil {
return err return err
} }
name := "mage" return sh.Run("go", "install", "-ldflags="+ldf, "github.com/magefile/mage")
if runtime.GOOS == "windows" {
name += ".exe"
}
gopath, err := sh.Output("go", "env", "GOPATH")
if err != nil {
return fmt.Errorf("can't determine GOPATH: %v", err)
}
paths := strings.Split(gopath, string([]rune{os.PathListSeparator}))
bin := filepath.Join(paths[0], "bin")
// specifically don't mkdirall, if you have an invalid gopath in the first
// place, that's not on us to fix.
if err := os.Mkdir(bin, 0700); err != nil && !os.IsExist(err) {
return fmt.Errorf("failed to create %q: %v", bin, err)
}
path := filepath.Join(bin, name)
// we use go build here because if someone built with go get, then `go
// install` turns into a no-op, and `go install -a` fails on people's
// machines that have go installed in a non-writeable directory (such as
// normal OS installs in /usr/bin)
return sh.RunV("go", "build", "-o", path, "-ldflags="+ldf, "github.com/magefile/mage")
} }
// Generates a new release. Expects the TAG environment variable to be set, // Generates a new release. Expects the TAG environment variable to be set,
@ -83,8 +60,9 @@ func flags() (string, error) {
// tag returns the git tag for the current branch or "" if none. // tag returns the git tag for the current branch or "" if none.
func tag() string { func tag() string {
s, _ := sh.Output("git", "describe", "--tags") buf := &bytes.Buffer{}
return s _, _ = sh.Exec(nil, buf, nil, "git", "describe", "--tags")
return buf.String()
} }
// hash returns the git hash for the current repo or "" if none. // hash returns the git hash for the current repo or "" if none.