dependencies
This commit is contained in:
parent
fcef86d441
commit
6b17c2f576
|
@ -0,0 +1,68 @@
|
|||
{
|
||||
"version": 0,
|
||||
"dependencies": [
|
||||
{
|
||||
"importpath": "github.com/Sereal/Sereal/Go/sereal",
|
||||
"repository": "https://github.com/Sereal/Sereal",
|
||||
"revision": "cf4a7a728a6056d9cbb112e24093ac09fb2de069",
|
||||
"branch": "master",
|
||||
"path": "/Go/sereal"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/Xe/Tetra/1459",
|
||||
"repository": "https://github.com/Xe/Tetra",
|
||||
"revision": "dd27f12d43838c0232b720941390f709991e0121",
|
||||
"branch": "master",
|
||||
"path": "/1459"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/asdine/storm",
|
||||
"repository": "https://github.com/asdine/storm",
|
||||
"revision": "d63bd29719e3c74c43790d8dd5a5a2bf78310332",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/boltdb/bolt",
|
||||
"repository": "https://github.com/boltdb/bolt",
|
||||
"revision": "dfb21201d9270c1082d5fb0f07f500311ff72f18",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/dchest/blake2b",
|
||||
"repository": "https://github.com/dchest/blake2b",
|
||||
"revision": "3c8c640cd7bea3ca78209d812b5854442ab92fed",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/fatih/structs",
|
||||
"repository": "https://github.com/fatih/structs",
|
||||
"revision": "3fe2facc32a7fbde4b29c0f85604dc1dd22836d2",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/golang/protobuf/proto",
|
||||
"repository": "https://github.com/golang/protobuf",
|
||||
"revision": "9e6977f30c91c78396e719e164e57f9287fff42c",
|
||||
"branch": "master",
|
||||
"path": "/proto"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/golang/snappy",
|
||||
"repository": "https://github.com/golang/snappy",
|
||||
"revision": "d6668316e43571d7dde95be6fd077f96de002f8b",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "github.com/technoweenie/multipartstreamer",
|
||||
"repository": "https://github.com/technoweenie/multipartstreamer",
|
||||
"revision": "a90a01d73ae432e2611d178c18367fbaa13e0154",
|
||||
"branch": "master"
|
||||
},
|
||||
{
|
||||
"importpath": "gopkg.in/telegram-bot-api.v4",
|
||||
"repository": "https://gopkg.in/telegram-bot-api.v4",
|
||||
"revision": "217764ba453f319b00d05506e56d26ef0a9fc231",
|
||||
"branch": "master"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
# This is the official list of Go Sereal authors for copyright purposes.
|
||||
Damian Gryski <damian@gryski.com>
|
||||
Gareth Kirwan <gbjk@thermeon.com>
|
||||
Ivan Kruglov <ivan.kruglov@yahoo.com>
|
||||
Maxim Vuets <maxim.vuets@gmail.com>
|
||||
Yuval Kogman <nothingmuch@woobling.org>
|
|
@ -0,0 +1,23 @@
|
|||
Copyright (c) 2013-2016 The Go Sereal Authors
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright notice,
|
||||
this list of conditions and the following disclaimer.
|
||||
|
||||
* Redistributions in binary form must reproduce the above copyright notice,
|
||||
this list of conditions and the following disclaimer in the documentation
|
||||
and/or other materials provided with the distribution.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,41 @@
|
|||
CORPUS_PROTO_VER ?= 3
|
||||
CORPUS_COMPRESS ?= SRL_UNCOMPRESSED
|
||||
|
||||
test_all: test compat
|
||||
|
||||
test: test_dir
|
||||
-go test
|
||||
|
||||
compat: test_dir
|
||||
-rm -f test_dir/test_data_*-go.out
|
||||
-rm -f test_freeze/*-go.out
|
||||
go test -test.run=TestCorpus
|
||||
go test -test.run=TestPrepareFreezeRoundtrip
|
||||
prove ./test-compat.pl
|
||||
env RUN_FREEZE=1 go test -test.run=TestFreezeRoundtrip
|
||||
|
||||
../../Perl/Decoder/blib:
|
||||
cd ../../Perl/Decoder/ ; perl Makefile.PL
|
||||
make -C ../../Perl/Decoder
|
||||
|
||||
../../Perl/Encoder/blib:
|
||||
cd ../../Perl/Encoder/ ; perl Makefile.PL
|
||||
make -C ../../Perl/Encoder
|
||||
|
||||
test_dir: ../../Perl/Decoder/blib ../../Perl/Encoder/blib test_dir/VERSION_$(CORPUS_PROTO_VER) test_dir/COMPRESS_$(CORPUS_COMPRESS) test_files
|
||||
cd ../../Perl/Encoder; perl -Mblib=blib -I t/lib/ -I ../shared/t/lib -MSereal::TestSet -MSereal::Encoder -e '$$Sereal::TestSet::PROTO_VERSION=$(CORPUS_PROTO_VER); $$Sereal::TestSet::COMPRESS=Sereal::Encoder::$(CORPUS_COMPRESS); Sereal::TestSet::write_test_files("../../Go/sereal/test_dir/")'
|
||||
touch "test_dir/VERSION_$(CORPUS_PROTO_VER)"
|
||||
touch "test_dir/COMPRESS_$(CORPUS_COMPRESS)"
|
||||
|
||||
test_files:
|
||||
mkdir -p test_dir
|
||||
mkdir -p test_freeze
|
||||
perl -Mblib=../../Perl/Encoder/blib -MSereal::Encoder cmd/gen/test-decode-struct.pl test_dir
|
||||
|
||||
test_dir/VERSION_$(CORPUS_PROTO_VER):
|
||||
rm -f test_dir/VERSION_*
|
||||
|
||||
test_dir/COMPRESS_$(CORPUS_COMPRESS):
|
||||
rm -f test_dir/COMPRESS_*
|
||||
|
||||
.PHONY: test_all test compat
|
|
@ -0,0 +1,15 @@
|
|||
TODO:
|
||||
- lots of duplicatish code to remove:
|
||||
decode cases are similar too: array/ref, map/ref/ array/binary/short-binary
|
||||
- discard values that don't need to be built
|
||||
- profiling shows that we can spend a lot of time setting values in maps that we might throw away
|
||||
- if we don't need to keep the value, we should try to not actually create it in memory
|
||||
- how do we handle hashes?
|
||||
: only string keys are supported -- do we call value.String() or panic()?
|
||||
: what other coersions (ints <-> strings)
|
||||
- roundtrip test: perl obj -> perl-sereal stream -> go1 obj -> go-sereal stream -> go2 obj, DeepEqual(go1, go2)
|
||||
- class names need their own string table (for OBJECTV)
|
||||
- string table should have hash keys only? (unless dedup strings is true?)
|
||||
- string vs. byte array handling?
|
||||
- match 'ptr->array':ARRAYREF and 'ptr->hash':HASHREF
|
||||
- go can't do this in one-pass: we don't have ref counts
|
|
@ -0,0 +1,72 @@
|
|||
package sereal_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/Sereal/Sereal/Go/sereal"
|
||||
)
|
||||
|
||||
var solarSystemMeta = map[string]interface{}{
|
||||
"title": "Interesting facts about Solar system",
|
||||
}
|
||||
|
||||
var solarSystem = map[string]interface{}{
|
||||
"galaxy": "Milky Way",
|
||||
"age": 4568,
|
||||
"stars": []string{"Sun"},
|
||||
"planets": []struct {
|
||||
pos int
|
||||
name string
|
||||
mass_earths float64
|
||||
notable_satellites []string
|
||||
}{
|
||||
{1, "Mercury", 0.055, []string{}},
|
||||
{2, "Venus", 0.815, []string{}},
|
||||
{3, "Earth", 1.0, []string{"Moon"}},
|
||||
{4, "Mars", 0.107, []string{"Phobos", "Deimos"}},
|
||||
{5, "Jupiter", 317.83, []string{"Io", "Europa", "Ganymede", "Callisto"}},
|
||||
{6, "Saturn", 95.16, []string{"Titan", "Rhea", "Enceladus"}},
|
||||
{7, "Uranus", 14.536, []string{"Oberon", "Titania", "Miranda", "Ariel", "Umbriel"}},
|
||||
{8, "Neptune", 17.15, []string{"Tritan"}},
|
||||
},
|
||||
}
|
||||
|
||||
func BenchmarkEncodeComplexDataWithHeader(b *testing.B) {
|
||||
enc := sereal.NewEncoderV3()
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := enc.MarshalWithHeader(solarSystemMeta, solarSystem)
|
||||
if err != nil {
|
||||
b.FailNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeAndSnappyComplexDataWithHeader(b *testing.B) {
|
||||
enc := sereal.NewEncoderV3()
|
||||
enc.Compression = sereal.SnappyCompressor{Incremental: true}
|
||||
enc.CompressionThreshold = 0
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := enc.MarshalWithHeader(solarSystemMeta, solarSystem)
|
||||
if err != nil {
|
||||
b.FailNow()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkEncodeAndZlibComplexDataWithHeader(b *testing.B) {
|
||||
enc := sereal.NewEncoderV3()
|
||||
enc.Compression = sereal.ZlibCompressor{Level: sereal.ZlibDefaultCompression}
|
||||
enc.CompressionThreshold = 0
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := enc.MarshalWithHeader(solarSystemMeta, solarSystem)
|
||||
if err != nil {
|
||||
b.FailNow()
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
#!/usr/bin/perl
|
||||
|
||||
use blib "../../Perl/Decoder/blib/";
|
||||
use blib "../../Perl/Encoder/blib/";
|
||||
use lib "../../Perl/shared/t/lib/";
|
||||
|
||||
use Sereal::Decoder qw(decode_sereal);
|
||||
use Data::Dumper;
|
||||
|
||||
$Data::Dumper::Indent = 1;
|
||||
$Data::Dumper::Sortkeys = 1;
|
||||
|
||||
$/ = undef;
|
||||
|
||||
while(<>) {
|
||||
my $o = decode_sereal($_);
|
||||
print Dumper($o), "\n";
|
||||
}
|
|
@ -0,0 +1,115 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"encoding/hex"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/Sereal/Sereal/Go/sereal"
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/dchest/siphash"
|
||||
"github.com/dgryski/go-ddmin"
|
||||
)
|
||||
|
||||
func sipuintptr(s []uintptr) uint64 {
|
||||
|
||||
b := make([]byte, len(s)*8)
|
||||
|
||||
for i, v := range s {
|
||||
binary.LittleEndian.PutUint64(b[i*8:], uint64(v))
|
||||
}
|
||||
|
||||
return siphash.Hash(0, 0, b)
|
||||
}
|
||||
|
||||
func unmarshal(b []byte) (intf map[string]interface{}, crash uint64, err error) {
|
||||
|
||||
defer func() {
|
||||
if p := recover(); p != nil {
|
||||
var stack [5]uintptr
|
||||
runtime.Callers(4, stack[:])
|
||||
crash = sipuintptr(stack[:])
|
||||
err = p.(error)
|
||||
}
|
||||
}()
|
||||
|
||||
d := sereal.Decoder{}
|
||||
|
||||
err = d.Unmarshal(b, &intf)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
return intf, 0, nil
|
||||
}
|
||||
|
||||
func process(fname string, b []byte) {
|
||||
|
||||
intf, _, err := unmarshal(b)
|
||||
|
||||
if err != nil {
|
||||
switch e := err.(type) {
|
||||
case sereal.ErrCorrupt:
|
||||
log.Fatalf("error processing %s: %s (%s)", fname, e, e.Err)
|
||||
default:
|
||||
log.Fatalf("error processing %s: %s", fname, e)
|
||||
}
|
||||
}
|
||||
|
||||
spew.Dump(intf)
|
||||
}
|
||||
|
||||
func minimize(b []byte, crashWant uint64, errWant error) []byte {
|
||||
return ddmin.Minimize(b, func(b []byte) ddmin.Result {
|
||||
_, crash, got := unmarshal(b)
|
||||
|
||||
if got == nil {
|
||||
return ddmin.Pass
|
||||
}
|
||||
|
||||
if crashWant == crash && got.Error() == errWant.Error() {
|
||||
return ddmin.Fail
|
||||
}
|
||||
|
||||
return ddmin.Unresolved
|
||||
})
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
optMinimize := flag.Bool("minimize", false, "minimize test input")
|
||||
|
||||
flag.Parse()
|
||||
|
||||
if *optMinimize {
|
||||
b, _ := ioutil.ReadAll(os.Stdin)
|
||||
log.Println("data to minimize length", len(b))
|
||||
_, crashWant, errWant := unmarshal(b)
|
||||
if errWant == nil {
|
||||
log.Fatal("no error received while unmarshalling")
|
||||
}
|
||||
log.Printf("crash=%x errWant=%+v\n", crashWant, errWant)
|
||||
m := minimize(b, crashWant, errWant)
|
||||
_, crashGot, errGot := unmarshal(m)
|
||||
log.Printf("crash=%x errGot=%+v\n", crashGot, errGot)
|
||||
log.Println("minimized length", len(m))
|
||||
log.Println("\n" + hex.Dump(m))
|
||||
os.Stdout.Write(m)
|
||||
return
|
||||
}
|
||||
|
||||
if flag.NArg() == 0 {
|
||||
b, _ := ioutil.ReadAll(os.Stdin)
|
||||
process("stdin", b)
|
||||
return
|
||||
}
|
||||
|
||||
for _, arg := range flag.Args() {
|
||||
b, _ := ioutil.ReadFile(arg)
|
||||
process(arg, b)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
crand "crypto/rand"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"github.com/Sereal/Sereal/Go/sereal"
|
||||
mrand "math/rand"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
||||
srlHeader, _ := hex.DecodeString("3d73726c0100")
|
||||
|
||||
var decoder sereal.Decoder
|
||||
decoder.PerlCompat = true
|
||||
|
||||
for {
|
||||
l := len(srlHeader) + mrand.Intn(200)
|
||||
b := make([]byte, l)
|
||||
crand.Read(b)
|
||||
doc := make([]byte, l + len(srlHeader))
|
||||
copy(doc, srlHeader)
|
||||
copy(doc[6:], b)
|
||||
fmt.Println(hex.Dump(doc))
|
||||
var m interface{}
|
||||
err := decoder.Unmarshal(doc, &m)
|
||||
fmt.Println("err=", err)
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
#!/usr/bin/perl
|
||||
|
||||
#use blib "../../Perl/Decoder/blib/";
|
||||
#use blib "../../Perl/Encoder/blib/";
|
||||
#use lib "../../Perl/shared/t/lib/";
|
||||
|
||||
use Sereal::Encoder qw(encode_sereal);
|
||||
|
||||
my $obj1 = {
|
||||
ValueStr => "string as string value which actually should be 32+ characters",
|
||||
ValueByte => "string as binary value",
|
||||
ValueInt => 10,
|
||||
ValueSlice => [ 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0 ],
|
||||
ValueHash => {
|
||||
key1 => "unique value",
|
||||
key2 => "duplicate value",
|
||||
key3 => "deplicate value",
|
||||
}
|
||||
};
|
||||
|
||||
my $obj2 = {
|
||||
ValueStr => "another string as string value which actually should be 32+ characters",
|
||||
ValueByte => "another string as binary value",
|
||||
ValueInt => -10,
|
||||
ValueSlice => [ 18.0, 19.0, 20.0 ],
|
||||
ValueHash => {
|
||||
key1 => "unique value",
|
||||
key2 => "duplicate value",
|
||||
key3 => "deplicate value",
|
||||
}
|
||||
};
|
||||
|
||||
open(my $fh, '>', $ARGV[0] . "/test-decode-struct.srl") or die $!;
|
||||
print $fh encode_sereal([ $obj1, $obj2, $obj1 ], { dedupe_strings => 1 });
|
||||
exit;
|
|
@ -0,0 +1,68 @@
|
|||
package sereal
|
||||
|
||||
// ProtocolVersion is a maximum version supported by the sereal package.
|
||||
const ProtocolVersion = 3
|
||||
|
||||
// magicHeadrBytes is a magic string for header. Every packet in protocol
|
||||
// version 1 and 2 starts with this.
|
||||
const magicHeaderBytes = uint32(0x6c72733d) // "=srl"
|
||||
|
||||
// magicHeaderBytesHighBit is a new magic string for header used in protocol
|
||||
// version 3 and up, with high bit set for UTF8 sanity check. It is an error to
|
||||
// use a new magic header on a v1 or v2 packet, and it is an error to use the
|
||||
// old magic header in v3 or later.
|
||||
const magicHeaderBytesHighBit = uint32(0x6c72f33d) // "=\xF3rl"
|
||||
|
||||
// magicHeaderBytesHighBitUTF8 is a magic string for header v3, corrupted by
|
||||
// accidental UTF8 encoding. It makes it easy to detect when a Sereal document
|
||||
// has been accidentally UTF-8 encoded because the \xF3 is translated to
|
||||
// \xC3\xB3.
|
||||
const magicHeaderBytesHighBitUTF8 = uint32(0x72b3c33d) // "=\xC3\xB3r"
|
||||
|
||||
const headerSize = 5 // 4 magic + 1 version-type
|
||||
|
||||
type documentType int
|
||||
|
||||
const (
|
||||
serealRaw documentType = iota
|
||||
serealSnappy
|
||||
serealSnappyIncremental
|
||||
serealZlib
|
||||
)
|
||||
|
||||
type typeTag byte
|
||||
|
||||
const trackFlag = byte(0x80)
|
||||
|
||||
const (
|
||||
typeVARINT = 0x20
|
||||
typeZIGZAG = 0x21
|
||||
typeFLOAT = 0x22
|
||||
typeDOUBLE = 0x23
|
||||
typeLONG_DOUBLE = 0x24
|
||||
typeUNDEF = 0x25
|
||||
typeBINARY = 0x26
|
||||
typeSTR_UTF8 = 0x27
|
||||
typeREFN = 0x28
|
||||
typeREFP = 0x29
|
||||
typeHASH = 0x2a
|
||||
typeARRAY = 0x2b
|
||||
typeOBJECT = 0x2c
|
||||
typeOBJECTV = 0x2d
|
||||
typeALIAS = 0x2e
|
||||
typeCOPY = 0x2f
|
||||
typeWEAKEN = 0x30
|
||||
typeREGEXP = 0x31
|
||||
typeOBJECT_FREEZE = 0x32
|
||||
typeOBJECTV_FREEZE = 0x33
|
||||
typeCANONICAL_UNDEF = 0x39
|
||||
typeFALSE = 0x3a
|
||||
typeTRUE = 0x3b
|
||||
typeMANY = 0x3c
|
||||
typePACKET_START = 0x3d
|
||||
typeEXTEND = 0x3e
|
||||
typePAD = 0x3f
|
||||
typeARRAYREF_0 = 0x40
|
||||
typeHASHREF_0 = 0x50
|
||||
typeSHORT_BINARY_0 = 0x60
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,12 @@
|
|||
/*
|
||||
Package sereal implements the Sereal serialization format
|
||||
|
||||
It follows the standard Go Marshal/Unmarshal interface.
|
||||
|
||||
For more information on Sereal, please see
|
||||
http://blog.booking.com/sereal-a-binary-data-serialization-format.html
|
||||
and
|
||||
http://github.com/Sereal/Sereal
|
||||
|
||||
*/
|
||||
package sereal
|
|
@ -0,0 +1,624 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// An Encoder encodes Go data structures into Sereal byte streams
|
||||
type Encoder struct {
|
||||
PerlCompat bool // try to mimic Perl's structure as much as possible
|
||||
Compression compressor // optionally compress the main payload of the document using SnappyCompressor or ZlibCompressor
|
||||
CompressionThreshold int // threshold in bytes above which compression is attempted: 1024 bytes by default
|
||||
DisableDedup bool // should we disable deduping of class names and hash keys
|
||||
DisableFREEZE bool // should we disable the FREEZE tag, which calls MarshalBinary
|
||||
ExpectedSize uint // give a hint to encoder about expected size of encoded data
|
||||
version int // default version to encode
|
||||
tcache tagsCache
|
||||
}
|
||||
|
||||
type compressor interface {
|
||||
compress(b []byte) ([]byte, error)
|
||||
}
|
||||
|
||||
// NewEncoder returns a new Encoder struct with default values
|
||||
func NewEncoder() *Encoder {
|
||||
return &Encoder{
|
||||
PerlCompat: false,
|
||||
CompressionThreshold: 1024,
|
||||
version: 1,
|
||||
}
|
||||
}
|
||||
|
||||
// NewEncoderV2 returns a new Encoder that encodes version 2
|
||||
func NewEncoderV2() *Encoder {
|
||||
return &Encoder{
|
||||
PerlCompat: false,
|
||||
CompressionThreshold: 1024,
|
||||
version: 2,
|
||||
}
|
||||
}
|
||||
|
||||
// NewEncoderV3 returns a new Encoder that encodes version 3
|
||||
func NewEncoderV3() *Encoder {
|
||||
return &Encoder{
|
||||
PerlCompat: false,
|
||||
CompressionThreshold: 1024,
|
||||
version: 3,
|
||||
}
|
||||
}
|
||||
|
||||
var defaultEncoder = NewEncoderV3()
|
||||
|
||||
// Marshal encodes body with the default encoder
|
||||
func Marshal(body interface{}) ([]byte, error) {
|
||||
return defaultEncoder.MarshalWithHeader(nil, body)
|
||||
}
|
||||
|
||||
// Marshal returns the Sereal encoding of body
|
||||
func (e *Encoder) Marshal(body interface{}) (b []byte, err error) {
|
||||
return e.MarshalWithHeader(nil, body)
|
||||
}
|
||||
|
||||
// MarshalWithHeader returns the Sereal encoding of body with header data
|
||||
func (e *Encoder) MarshalWithHeader(header interface{}, body interface{}) (b []byte, err error) {
|
||||
defer func() {
|
||||
//return
|
||||
if r := recover(); r != nil {
|
||||
if _, ok := r.(runtime.Error); ok {
|
||||
panic(r)
|
||||
}
|
||||
|
||||
if s, ok := r.(string); ok {
|
||||
err = errors.New(s)
|
||||
} else {
|
||||
err = r.(error)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// uninitialized encoder? set to the most recent supported protocol version
|
||||
if e.version == 0 {
|
||||
e.version = ProtocolVersion
|
||||
}
|
||||
|
||||
encHeader := make([]byte, headerSize, 32)
|
||||
|
||||
if e.version < 3 {
|
||||
binary.LittleEndian.PutUint32(encHeader[:4], magicHeaderBytes)
|
||||
} else {
|
||||
binary.LittleEndian.PutUint32(encHeader[:4], magicHeaderBytesHighBit)
|
||||
}
|
||||
|
||||
// Set the <version-type> component in the header
|
||||
encHeader[4] = byte(e.version) | byte(serealRaw)<<4
|
||||
|
||||
if header != nil && e.version >= 2 {
|
||||
strTable := make(map[string]int)
|
||||
ptrTable := make(map[uintptr]int)
|
||||
// this is both the flag byte (== "there is user data") and also a hack to make 1-based offsets work
|
||||
henv := []byte{0x01} // flag byte == "there is user data"
|
||||
encHeaderSuffix, err := e.encode(henv, header, false, false, strTable, ptrTable)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
encHeader = varint(encHeader, uint(len(encHeaderSuffix)))
|
||||
encHeader = append(encHeader, encHeaderSuffix...)
|
||||
} else {
|
||||
/* header size */
|
||||
encHeader = append(encHeader, 0)
|
||||
}
|
||||
|
||||
strTable := make(map[string]int)
|
||||
ptrTable := make(map[uintptr]int)
|
||||
|
||||
encBody := make([]byte, 0, e.ExpectedSize)
|
||||
|
||||
switch e.version {
|
||||
case 1:
|
||||
encBody, err = e.encode(encBody, body, false, false, strTable, ptrTable)
|
||||
case 2, 3:
|
||||
encBody = append(encBody, 0) // hack for 1-based offsets
|
||||
encBody, err = e.encode(encBody, body, false, false, strTable, ptrTable)
|
||||
encBody = encBody[1:] // trim hacky first byte
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if e.Compression != nil && (e.CompressionThreshold == 0 || len(encBody) >= e.CompressionThreshold) {
|
||||
encBody, err = e.Compression.compress(encBody)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var doctype documentType
|
||||
|
||||
switch c := e.Compression.(type) {
|
||||
case SnappyCompressor:
|
||||
if e.version > 1 && !c.Incremental {
|
||||
return nil, errors.New("non-incremental snappy compression only valid for v1 documents")
|
||||
}
|
||||
if e.version == 1 {
|
||||
doctype = serealSnappy
|
||||
} else {
|
||||
doctype = serealSnappyIncremental
|
||||
}
|
||||
case ZlibCompressor:
|
||||
if e.version < 3 {
|
||||
return nil, errors.New("zlib compression only valid for v3 documents and up")
|
||||
}
|
||||
doctype = serealZlib
|
||||
default:
|
||||
// Defensive programming: this point should never be
|
||||
// reached in production code because the compressor
|
||||
// interface is not exported, hence no way to pass in
|
||||
// an unknown thing. But it may happen during
|
||||
// development when a new compressor is implemented,
|
||||
// but a relevant document type is not defined.
|
||||
panic("undefined compression")
|
||||
}
|
||||
|
||||
encHeader[4] |= byte(doctype) << 4
|
||||
}
|
||||
|
||||
return append(encHeader, encBody...), nil
|
||||
}
|
||||
|
||||
/*************************************
|
||||
* Encode via static types - fast path
|
||||
*************************************/
|
||||
func (e *Encoder) encode(b []byte, v interface{}, isKeyOrClass bool, isRefNext bool, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
var err error
|
||||
|
||||
switch value := v.(type) {
|
||||
case nil:
|
||||
b = append(b, typeUNDEF)
|
||||
|
||||
case bool:
|
||||
if value {
|
||||
b = append(b, typeTRUE)
|
||||
} else {
|
||||
b = append(b, typeFALSE)
|
||||
}
|
||||
|
||||
case int:
|
||||
b = e.encodeInt(b, reflect.Int, int64(value))
|
||||
case int8:
|
||||
b = e.encodeInt(b, reflect.Int, int64(value))
|
||||
case int16:
|
||||
b = e.encodeInt(b, reflect.Int, int64(value))
|
||||
case int32:
|
||||
b = e.encodeInt(b, reflect.Int, int64(value))
|
||||
case int64:
|
||||
b = e.encodeInt(b, reflect.Int, int64(value))
|
||||
|
||||
case uint:
|
||||
b = e.encodeInt(b, reflect.Uint, int64(value))
|
||||
case uint8:
|
||||
b = e.encodeInt(b, reflect.Uint, int64(value))
|
||||
case uint16:
|
||||
b = e.encodeInt(b, reflect.Uint, int64(value))
|
||||
case uint32:
|
||||
b = e.encodeInt(b, reflect.Uint, int64(value))
|
||||
case uint64:
|
||||
b = e.encodeInt(b, reflect.Uint, int64(value))
|
||||
|
||||
case float32:
|
||||
b = e.encodeFloat(b, value)
|
||||
case float64:
|
||||
b = e.encodeDouble(b, value)
|
||||
|
||||
case string:
|
||||
b = e.encodeString(b, value, isKeyOrClass, strTable)
|
||||
|
||||
case []uint8:
|
||||
b = e.encodeBytes(b, value, isKeyOrClass, strTable)
|
||||
|
||||
case []interface{}:
|
||||
b, err = e.encodeIntfArray(b, value, isRefNext, strTable, ptrTable)
|
||||
|
||||
case map[string]interface{}:
|
||||
b, err = e.encodeStrMap(b, value, isRefNext, strTable, ptrTable)
|
||||
|
||||
case reflect.Value:
|
||||
if value.Kind() == reflect.Invalid {
|
||||
b = append(b, typeUNDEF)
|
||||
} else {
|
||||
// could be optimized to tail call
|
||||
b, err = e.encode(b, value.Interface(), false, isRefNext, strTable, ptrTable)
|
||||
}
|
||||
|
||||
case PerlUndef:
|
||||
if value.canonical {
|
||||
b = append(b, typeCANONICAL_UNDEF)
|
||||
} else {
|
||||
b = append(b, typeUNDEF)
|
||||
}
|
||||
|
||||
case PerlObject:
|
||||
b = append(b, typeOBJECT)
|
||||
b = e.encodeBytes(b, []byte(value.Class), true, strTable)
|
||||
b, err = e.encode(b, value.Reference, false, false, strTable, ptrTable)
|
||||
|
||||
case PerlRegexp:
|
||||
b = append(b, typeREGEXP)
|
||||
b = e.encodeBytes(b, value.Pattern, false, strTable)
|
||||
b = e.encodeBytes(b, value.Modifiers, false, strTable)
|
||||
|
||||
case PerlWeakRef:
|
||||
b = append(b, typeWEAKEN)
|
||||
b, err = e.encode(b, value.Reference, false, false, strTable, ptrTable)
|
||||
|
||||
//case *interface{}:
|
||||
//TODO handle here if easy
|
||||
|
||||
//case interface{}:
|
||||
// http://blog.golang.org/laws-of-reflection
|
||||
// One important detail is that the pair inside an interface always has the form (value, concrete type)
|
||||
// and cannot have the form (value, interface type). Interfaces do not hold interface values.
|
||||
//panic("interface cannot hold an interface")
|
||||
|
||||
// ikruglov
|
||||
// in theory this block should no be commented,
|
||||
// but in practise type *interface{} somehow manages to match interface{}
|
||||
// if one manages to properly implement *interface{} case, this block should be uncommented
|
||||
|
||||
default:
|
||||
b, err = e.encodeViaReflection(b, reflect.ValueOf(value), isKeyOrClass, isRefNext, strTable, ptrTable)
|
||||
}
|
||||
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeInt(by []byte, k reflect.Kind, i int64) []byte {
|
||||
switch {
|
||||
case 0 <= i && i <= 15:
|
||||
by = append(by, byte(i)&0x0f)
|
||||
case -16 <= i && i < 0 && k == reflect.Int:
|
||||
by = append(by, 0x010|(byte(i)&0x0f))
|
||||
case i > 15:
|
||||
by = append(by, typeVARINT)
|
||||
by = varint(by, uint(i))
|
||||
case i < 0:
|
||||
n := uint(i)
|
||||
if k == reflect.Int {
|
||||
by = append(by, typeZIGZAG)
|
||||
n = uint((i << 1) ^ (i >> 63))
|
||||
} else {
|
||||
by = append(by, typeVARINT)
|
||||
}
|
||||
|
||||
by = varint(by, uint(n))
|
||||
}
|
||||
|
||||
return by
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeFloat(by []byte, f float32) []byte {
|
||||
u := math.Float32bits(f)
|
||||
by = append(by, typeFLOAT)
|
||||
by = append(by, byte(u))
|
||||
by = append(by, byte(u>>8))
|
||||
by = append(by, byte(u>>16))
|
||||
by = append(by, byte(u>>24))
|
||||
return by
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeDouble(by []byte, f float64) []byte {
|
||||
u := math.Float64bits(f)
|
||||
by = append(by, typeDOUBLE)
|
||||
by = append(by, byte(u))
|
||||
by = append(by, byte(u>>8))
|
||||
by = append(by, byte(u>>16))
|
||||
by = append(by, byte(u>>24))
|
||||
by = append(by, byte(u>>32))
|
||||
by = append(by, byte(u>>40))
|
||||
by = append(by, byte(u>>48))
|
||||
by = append(by, byte(u>>56))
|
||||
return by
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeString(by []byte, s string, isKeyOrClass bool, strTable map[string]int) []byte {
|
||||
if !e.DisableDedup && isKeyOrClass {
|
||||
if copyOffs, ok := strTable[s]; ok {
|
||||
by = append(by, typeCOPY)
|
||||
by = varint(by, uint(copyOffs))
|
||||
return by
|
||||
}
|
||||
strTable[s] = len(by)
|
||||
}
|
||||
|
||||
by = append(by, typeSTR_UTF8)
|
||||
by = varint(by, uint(len(s)))
|
||||
return append(by, s...)
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeBytes(by []byte, byt []byte, isKeyOrClass bool, strTable map[string]int) []byte {
|
||||
if !e.DisableDedup && isKeyOrClass {
|
||||
if copyOffs, ok := strTable[string(byt)]; ok {
|
||||
by = append(by, typeCOPY)
|
||||
by = varint(by, uint(copyOffs))
|
||||
return by
|
||||
}
|
||||
// save for later
|
||||
strTable[string(byt)] = len(by)
|
||||
}
|
||||
|
||||
if l := len(byt); l < 32 {
|
||||
by = append(by, typeSHORT_BINARY_0+byte(l))
|
||||
} else {
|
||||
by = append(by, typeBINARY)
|
||||
by = varint(by, uint(l))
|
||||
}
|
||||
|
||||
return append(by, byt...)
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeIntfArray(by []byte, arr []interface{}, isRefNext bool, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
if e.PerlCompat && !isRefNext {
|
||||
by = append(by, typeREFN)
|
||||
}
|
||||
|
||||
// TODO implement ARRAYREF for small arrays
|
||||
|
||||
l := len(arr)
|
||||
by = append(by, typeARRAY)
|
||||
by = varint(by, uint(l))
|
||||
|
||||
var err error
|
||||
for i := 0; i < l; i++ {
|
||||
if by, err = e.encode(by, arr[i], false, false, strTable, ptrTable); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return by, nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeStrMap(by []byte, m map[string]interface{}, isRefNext bool, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
if e.PerlCompat && !isRefNext {
|
||||
by = append(by, typeREFN)
|
||||
}
|
||||
|
||||
// TODO implement HASHREF for small maps
|
||||
|
||||
by = append(by, typeHASH)
|
||||
by = varint(by, uint(len(m)))
|
||||
|
||||
var err error
|
||||
for k, v := range m {
|
||||
by = e.encodeString(by, k, true, strTable)
|
||||
if by, err = e.encode(by, v, false, false, strTable, ptrTable); err != nil {
|
||||
return by, err
|
||||
}
|
||||
}
|
||||
|
||||
return by, nil
|
||||
}
|
||||
|
||||
/*************************************
|
||||
* Encode via reflection
|
||||
*************************************/
|
||||
func (e *Encoder) encodeViaReflection(b []byte, rv reflect.Value, isKeyOrClass bool, isRefNext bool, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
if !e.DisableFREEZE && rv.Kind() != reflect.Invalid && rv.Kind() != reflect.Ptr {
|
||||
if m, ok := rv.Interface().(encoding.BinaryMarshaler); ok {
|
||||
by, err := m.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b = append(b, typeOBJECT_FREEZE)
|
||||
b = e.encodeString(b, concreteName(rv), true, strTable)
|
||||
b = append(b, typeREFN)
|
||||
b = append(b, typeARRAY)
|
||||
b = varint(b, uint(1))
|
||||
return e.encode(b, reflect.ValueOf(by), false, false, strTable, ptrTable)
|
||||
}
|
||||
}
|
||||
|
||||
// make sure we're looking at a real type and not an interface
|
||||
for rv.Kind() == reflect.Interface {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
|
||||
var err error
|
||||
switch rk := rv.Kind(); rk {
|
||||
case reflect.Slice:
|
||||
// uint8 case is handled in encode()
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
b, err = e.encodeArray(b, rv, isRefNext, strTable, ptrTable)
|
||||
|
||||
case reflect.Map:
|
||||
b, err = e.encodeMap(b, rv, isRefNext, strTable, ptrTable)
|
||||
|
||||
case reflect.Struct:
|
||||
b, err = e.encodeStruct(b, rv, strTable, ptrTable)
|
||||
|
||||
case reflect.Ptr:
|
||||
b, err = e.encodePointer(b, rv, strTable, ptrTable)
|
||||
|
||||
default:
|
||||
panic(fmt.Sprintf("no support for type '%s' (%s)", rk.String(), rv.Type()))
|
||||
}
|
||||
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeArray(by []byte, arr reflect.Value, isRefNext bool, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
if e.PerlCompat && !isRefNext {
|
||||
by = append(by, typeREFN)
|
||||
}
|
||||
|
||||
l := arr.Len()
|
||||
by = append(by, typeARRAY)
|
||||
by = varint(by, uint(l))
|
||||
|
||||
var err error
|
||||
for i := 0; i < l; i++ {
|
||||
if by, err = e.encode(by, arr.Index(i), false, false, strTable, ptrTable); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return by, nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeMap(by []byte, m reflect.Value, isRefNext bool, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
if e.PerlCompat && !isRefNext {
|
||||
by = append(by, typeREFN)
|
||||
}
|
||||
|
||||
keys := m.MapKeys()
|
||||
by = append(by, typeHASH)
|
||||
by = varint(by, uint(len(keys)))
|
||||
|
||||
if e.PerlCompat {
|
||||
var err error
|
||||
for _, k := range keys {
|
||||
by = e.encodeString(by, k.String(), true, strTable)
|
||||
if by, err = e.encode(by, m.MapIndex(k), false, false, strTable, ptrTable); err != nil {
|
||||
return by, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
for _, k := range keys {
|
||||
if by, err = e.encode(by, k, true, false, strTable, ptrTable); err != nil {
|
||||
return by, err
|
||||
}
|
||||
|
||||
if by, err = e.encode(by, m.MapIndex(k), false, false, strTable, ptrTable); err != nil {
|
||||
return by, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return by, nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeStruct(by []byte, st reflect.Value, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
tags := e.tcache.Get(st)
|
||||
|
||||
by = append(by, typeOBJECT)
|
||||
by = e.encodeBytes(by, []byte(st.Type().Name()), true, strTable)
|
||||
|
||||
if e.PerlCompat {
|
||||
// must be a reference
|
||||
by = append(by, typeREFN)
|
||||
}
|
||||
|
||||
by = append(by, typeHASH)
|
||||
by = varint(by, uint(len(tags)))
|
||||
|
||||
var err error
|
||||
for f, i := range tags {
|
||||
by = e.encodeString(by, f, true, strTable)
|
||||
if by, err = e.encode(by, st.Field(i), false, false, strTable, ptrTable); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return by, nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodePointer(by []byte, rv reflect.Value, strTable map[string]int, ptrTable map[uintptr]int) ([]byte, error) {
|
||||
// ikruglov
|
||||
// I don't fully understand this logic, so leave it as is :-)
|
||||
|
||||
if rv.Elem().Kind() == reflect.Struct {
|
||||
switch rv.Elem().Interface().(type) {
|
||||
case PerlRegexp:
|
||||
return e.encode(by, rv.Elem(), false, false, strTable, ptrTable)
|
||||
case PerlUndef:
|
||||
return e.encode(by, rv.Elem(), false, false, strTable, ptrTable)
|
||||
case PerlObject:
|
||||
return e.encode(by, rv.Elem(), false, false, strTable, ptrTable)
|
||||
case PerlWeakRef:
|
||||
return e.encode(by, rv.Elem(), false, false, strTable, ptrTable)
|
||||
}
|
||||
}
|
||||
|
||||
rvptr := rv.Pointer()
|
||||
rvptr2 := getPointer(rv.Elem())
|
||||
|
||||
offs, ok := ptrTable[rvptr]
|
||||
|
||||
if !ok && rvptr2 != 0 {
|
||||
offs, ok = ptrTable[rvptr2]
|
||||
if ok {
|
||||
rvptr = rvptr2
|
||||
}
|
||||
}
|
||||
|
||||
if ok { // seen this before
|
||||
by = append(by, typeREFP)
|
||||
by = varint(by, uint(offs))
|
||||
by[offs] |= trackFlag // original offset now tracked
|
||||
} else {
|
||||
lenbOrig := len(by)
|
||||
|
||||
by = append(by, typeREFN)
|
||||
|
||||
if rvptr != 0 {
|
||||
ptrTable[rvptr] = lenbOrig
|
||||
}
|
||||
|
||||
var err error
|
||||
by, err = e.encode(by, rv.Elem(), false, true, strTable, ptrTable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if rvptr2 != 0 {
|
||||
// The thing this this points to starts one after the current pointer
|
||||
ptrTable[rvptr2] = lenbOrig + 1
|
||||
}
|
||||
}
|
||||
|
||||
return by, nil
|
||||
}
|
||||
|
||||
func varint(by []byte, n uint) []uint8 {
|
||||
for n >= 0x80 {
|
||||
b := byte(n) | 0x80
|
||||
by = append(by, b)
|
||||
n >>= 7
|
||||
}
|
||||
|
||||
return append(by, byte(n))
|
||||
}
|
||||
|
||||
func getPointer(rv reflect.Value) uintptr {
|
||||
var rvptr uintptr
|
||||
|
||||
switch rv.Kind() {
|
||||
case reflect.Map, reflect.Slice:
|
||||
rvptr = rv.Pointer()
|
||||
case reflect.Interface:
|
||||
// FIXME: still needed?
|
||||
return getPointer(rv.Elem())
|
||||
case reflect.Ptr:
|
||||
rvptr = rv.Pointer()
|
||||
case reflect.String:
|
||||
ps := (*reflect.StringHeader)(unsafe.Pointer(rv.UnsafeAddr()))
|
||||
rvptr = ps.Data
|
||||
}
|
||||
|
||||
return rvptr
|
||||
}
|
||||
|
||||
func concreteName(value reflect.Value) string {
|
||||
return value.Type().PkgPath() + "." + value.Type().Name()
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package sereal
|
||||
|
||||
import "errors"
|
||||
|
||||
// Errors
|
||||
var (
|
||||
ErrBadHeaderUTF8 = errors.New("bad header: it seems your document was accidentally UTF-8 encoded")
|
||||
ErrBadHeader = errors.New("bad header: not a valid Sereal document")
|
||||
ErrBadSnappy = errors.New("snappy compression only valid for v1 documents")
|
||||
ErrBadZlibV3 = errors.New("zlib compression only valid for v3 documents and up")
|
||||
|
||||
ErrHeaderPointer = errors.New("expected pointer for header")
|
||||
ErrBodyPointer = errors.New("expected pointer for body")
|
||||
|
||||
ErrTruncated = errors.New("truncated document")
|
||||
ErrUnknownTag = errors.New("unknown tag byte")
|
||||
|
||||
ErrTooLarge = errors.New("sereal: document too large to be compressed with snappy")
|
||||
)
|
||||
|
||||
// ErrCorrupt is returned if the sereal document was corrupt
|
||||
type ErrCorrupt struct{ Err string }
|
||||
|
||||
// internal constants used for corrupt
|
||||
var (
|
||||
errBadSliceSize = "bad size for slice"
|
||||
errBadStringSize = "bad size for string"
|
||||
errBadOffset = "bad offset"
|
||||
errUntrackedOffsetREFP = "untracked offset for REFP"
|
||||
errBadHashSize = "bad size for hash"
|
||||
errStringish = "expected stringish for classname"
|
||||
errUntrackedOffsetAlias = "untracked offset for alias"
|
||||
errNestedCOPY = "bad nested copy tag"
|
||||
errBadVarint = "bad varint"
|
||||
errFreezeNotRefnArray = "OBJECT_FREEZE value not REFN+ARRAY"
|
||||
errFreezeNotArray = "OBJECT_FREEZE value not an array"
|
||||
errFreezeMultipleElts = "OBJECT_FREEZE array contains multiple elements"
|
||||
errFreezeNotByteSlice = "OBJECT_FREEZE array not []byte"
|
||||
)
|
||||
|
||||
func (c ErrCorrupt) Error() string { return "sereal: corrupt document" }
|
|
@ -0,0 +1,39 @@
|
|||
// +build gofuzz
|
||||
|
||||
package sereal
|
||||
|
||||
func Fuzz(data []byte) int {
|
||||
var m interface{}
|
||||
|
||||
header, err := readHeader(data)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
bodyStart := headerSize + header.suffixSize
|
||||
|
||||
if bodyStart > len(data) || bodyStart < 0 {
|
||||
return 0
|
||||
}
|
||||
|
||||
switch header.version {
|
||||
case 1, 2, 3:
|
||||
break
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
|
||||
switch header.doctype {
|
||||
case serealRaw:
|
||||
break
|
||||
case serealSnappy, serealSnappyIncremental, serealZlib:
|
||||
// ignore compressed data
|
||||
return 0
|
||||
}
|
||||
|
||||
if err := Unmarshal(data, &m); err != nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
return 1
|
||||
}
|
|
@ -0,0 +1,692 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
type topLevelElementType int
|
||||
|
||||
// Top-level data structures for merged documents
|
||||
const (
|
||||
TopLevelArray topLevelElementType = iota
|
||||
TopLevelArrayRef
|
||||
// TopLevelHash
|
||||
// TopLevelHashRef
|
||||
)
|
||||
|
||||
const hashKeysValuesFlag = uint32(1 << 31)
|
||||
|
||||
// Merger merges multiple sereal documents without reconstructing them
|
||||
type Merger struct {
|
||||
buf []byte
|
||||
strTable map[string]int
|
||||
objTable map[string]int
|
||||
version int
|
||||
length int
|
||||
lenOffset int
|
||||
bodyOffset int // 1-based
|
||||
|
||||
// public arguments
|
||||
|
||||
// TopLevelElement allows a user to choose what container will be used
|
||||
// at top level. Available options: array, arrayref, hash, hashref
|
||||
TopLevelElement topLevelElementType
|
||||
|
||||
// optionally compress the main payload of the document using SnappyCompressor or ZlibCompressor
|
||||
// CompressionThreshold specifies threshold in bytes above which compression is attempted: 1024 bytes by default
|
||||
Compression compressor
|
||||
CompressionThreshold int
|
||||
|
||||
// If enabled, merger will deduplicate all strings it meets.
|
||||
// Otherwise, only hash key and class names will be deduplicated
|
||||
DedupeStrings bool
|
||||
|
||||
// If enabled, KeepFlat keeps flat structture of final document.
|
||||
// Specifically, consider two arrays [A,B,C] and [D,E,F]:
|
||||
// - when KeepFlat == false, the result of merging is [[A,B,C],[D,E,F]]
|
||||
// - when KeepFlat == true, the result is [A,B,C,D,E,F]
|
||||
// This mode is relevant only to top level elements
|
||||
KeepFlat bool
|
||||
|
||||
// give a hint to encoder about expected size of encoded data
|
||||
ExpectedSize uint
|
||||
|
||||
// moved bool fields here to make struct smaller
|
||||
inited bool
|
||||
finished bool
|
||||
}
|
||||
|
||||
type mergerDoc struct {
|
||||
buf []byte
|
||||
trackIdxs []int
|
||||
trackTable map[int]int
|
||||
version int
|
||||
startIdx int // 0-based
|
||||
bodyOffset int // 1-based
|
||||
}
|
||||
|
||||
// NewMerger returns a merger using the latest sereal version
|
||||
func NewMerger() *Merger {
|
||||
return &Merger{
|
||||
TopLevelElement: TopLevelArrayRef,
|
||||
CompressionThreshold: 1024,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMergerV2 returns a merger for processing sereal v2 documents
|
||||
func NewMergerV2() *Merger {
|
||||
return &Merger{
|
||||
version: 2,
|
||||
TopLevelElement: TopLevelArrayRef,
|
||||
CompressionThreshold: 1024,
|
||||
}
|
||||
}
|
||||
|
||||
// NewMergerV3 returns a merger for processing sereal v3 documents
|
||||
func NewMergerV3() *Merger {
|
||||
return &Merger{
|
||||
version: 3,
|
||||
TopLevelElement: TopLevelArrayRef,
|
||||
CompressionThreshold: 1024,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Merger) initMerger() error {
|
||||
if m.inited {
|
||||
return nil
|
||||
}
|
||||
|
||||
// initialize internal fields
|
||||
m.strTable = make(map[string]int)
|
||||
m.objTable = make(map[string]int)
|
||||
|
||||
if m.ExpectedSize > 0 {
|
||||
m.buf = make([]byte, headerSize, m.ExpectedSize)
|
||||
} else {
|
||||
m.buf = make([]byte, headerSize)
|
||||
}
|
||||
|
||||
if m.version == 0 {
|
||||
m.version = ProtocolVersion
|
||||
}
|
||||
|
||||
switch {
|
||||
case m.version > ProtocolVersion:
|
||||
return fmt.Errorf("protocol version '%v' not yet supported", m.version)
|
||||
case m.version < 3:
|
||||
binary.LittleEndian.PutUint32(m.buf[:4], magicHeaderBytes)
|
||||
default:
|
||||
binary.LittleEndian.PutUint32(m.buf[:4], magicHeaderBytesHighBit)
|
||||
}
|
||||
|
||||
m.buf[4] = byte(m.version) // fill version
|
||||
m.buf = append(m.buf, 0) // no header
|
||||
m.bodyOffset = len(m.buf) - 1 // remember body offset
|
||||
|
||||
// append top level tags
|
||||
switch m.TopLevelElement {
|
||||
case TopLevelArray:
|
||||
m.buf = append(m.buf, typeARRAY)
|
||||
case TopLevelArrayRef:
|
||||
m.buf = append(m.buf, typeREFN, typeARRAY)
|
||||
default:
|
||||
return errors.New("invalid TopLevelElement")
|
||||
}
|
||||
|
||||
// remember len offset + pad bytes for length
|
||||
m.lenOffset = len(m.buf)
|
||||
for i := 0; i < binary.MaxVarintLen32; i++ {
|
||||
m.buf = append(m.buf, typePAD)
|
||||
}
|
||||
|
||||
m.inited = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Append adds the sereal document b and returns the number of elements added to the top-level structure
|
||||
func (m *Merger) Append(b []byte) (int, error) {
|
||||
if err := m.initMerger(); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
if m.finished {
|
||||
return 0, errors.New("finished document")
|
||||
}
|
||||
|
||||
docHeader, err := readHeader(b)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
doc := mergerDoc{
|
||||
buf: b[headerSize+docHeader.suffixSize:],
|
||||
version: int(docHeader.version),
|
||||
startIdx: 0,
|
||||
bodyOffset: -1, // 1-based offsets
|
||||
}
|
||||
|
||||
var decomp decompressor
|
||||
switch docHeader.doctype {
|
||||
case serealRaw:
|
||||
// nothing
|
||||
|
||||
case serealSnappy:
|
||||
if doc.version != 1 {
|
||||
return 0, errors.New("snappy compression only valid for v1 documents")
|
||||
}
|
||||
|
||||
decomp = SnappyCompressor{Incremental: false}
|
||||
|
||||
case serealSnappyIncremental:
|
||||
decomp = SnappyCompressor{Incremental: true}
|
||||
|
||||
case serealZlib:
|
||||
if doc.version < 3 {
|
||||
return 0, errors.New("zlib compression only valid for v3 documents and up")
|
||||
}
|
||||
|
||||
decomp = ZlibCompressor{}
|
||||
|
||||
default:
|
||||
return 0, fmt.Errorf("document type '%d' not yet supported", docHeader.doctype)
|
||||
}
|
||||
|
||||
if decomp != nil {
|
||||
if doc.buf, err = decomp.decompress(doc.buf); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
|
||||
oldLength := m.length
|
||||
lastElementOffset := len(m.buf)
|
||||
|
||||
// first pass: build table of tracked tags
|
||||
if err := m.buildTrackTable(&doc); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// preallocate memory
|
||||
// copying data from doc.buf might seem to be unefficient,
|
||||
// but profiling/benchmarking shows that there is no
|
||||
// difference between growing slice by append() or via new() + copy()
|
||||
m.buf = append(m.buf, doc.buf...)
|
||||
m.buf = m.buf[:lastElementOffset]
|
||||
|
||||
// second pass: do the work
|
||||
if err := m.mergeItems(&doc); err != nil {
|
||||
m.buf = m.buf[0:lastElementOffset] // remove appended stuff
|
||||
return 0, err
|
||||
}
|
||||
|
||||
return m.length - oldLength, nil
|
||||
}
|
||||
|
||||
// Finish is called to terminate the merging process
|
||||
func (m *Merger) Finish() ([]byte, error) {
|
||||
if err := m.initMerger(); err != nil {
|
||||
return m.buf, err
|
||||
}
|
||||
|
||||
if !m.finished {
|
||||
m.finished = true
|
||||
binary.PutUvarint(m.buf[m.lenOffset:], uint64(m.length))
|
||||
|
||||
if m.Compression != nil && (m.CompressionThreshold == 0 || len(m.buf) >= m.CompressionThreshold) {
|
||||
compressed, err := m.Compression.compress(m.buf[m.bodyOffset+1:])
|
||||
if err != nil {
|
||||
return m.buf, err
|
||||
}
|
||||
|
||||
// TODO think about some optimizations here
|
||||
copy(m.buf[m.bodyOffset+1:], compressed)
|
||||
m.buf = m.buf[:len(compressed)+m.bodyOffset+1]
|
||||
|
||||
// verify compressor, there was little point in veryfing compressor in initMerger()
|
||||
// because use can change it meanwhile
|
||||
switch comp := m.Compression.(type) {
|
||||
case SnappyCompressor:
|
||||
if !comp.Incremental {
|
||||
return nil, errors.New("non-incremental snappy compression is not supported")
|
||||
}
|
||||
|
||||
m.buf[4] |= byte(serealSnappyIncremental) << 4
|
||||
|
||||
case ZlibCompressor:
|
||||
if m.version < 3 {
|
||||
return nil, errors.New("zlib compression only valid for v3 documents and up")
|
||||
}
|
||||
|
||||
m.buf[4] |= byte(serealZlib) << 4
|
||||
|
||||
default:
|
||||
return nil, errors.New("unknown compressor")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return m.buf, nil
|
||||
}
|
||||
|
||||
func (m *Merger) buildTrackTable(doc *mergerDoc) error {
|
||||
buf := doc.buf
|
||||
idx := doc.startIdx
|
||||
if idx < 0 || idx > len(buf) {
|
||||
return errors.New("invalid index")
|
||||
}
|
||||
|
||||
doc.trackTable = make(map[int]int)
|
||||
doc.trackIdxs = make([]int, 0)
|
||||
|
||||
for idx < len(buf) {
|
||||
tag := buf[idx]
|
||||
|
||||
if (tag & trackFlag) == trackFlag {
|
||||
doc.trackTable[idx-doc.bodyOffset] = -1
|
||||
tag &^= trackFlag
|
||||
}
|
||||
|
||||
//fmt.Printf("%x (%x) at %d (%d)\n", tag, buf[idx], idx, idx - doc.bodyOffset)
|
||||
|
||||
switch {
|
||||
case tag < typeVARINT,
|
||||
tag == typePAD, tag == typeREFN, tag == typeWEAKEN,
|
||||
tag == typeUNDEF, tag == typeCANONICAL_UNDEF,
|
||||
tag == typeTRUE, tag == typeFALSE, tag == typeEXTEND,
|
||||
tag == typeREGEXP, tag == typeOBJECT, tag == typeOBJECT_FREEZE:
|
||||
idx++
|
||||
|
||||
case tag == typeVARINT, tag == typeZIGZAG:
|
||||
_, sz, err := varintdecode(buf[idx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx += sz + 1
|
||||
|
||||
case tag == typeFLOAT:
|
||||
idx += 5 // 4 bytes + tag
|
||||
|
||||
case tag == typeDOUBLE:
|
||||
idx += 9 // 8 bytes + tag
|
||||
|
||||
case tag == typeLONG_DOUBLE:
|
||||
idx += 17 // 16 bytes + tag
|
||||
|
||||
case tag == typeBINARY, tag == typeSTR_UTF8:
|
||||
ln, sz, err := varintdecode(buf[idx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx += sz + ln + 1
|
||||
|
||||
if ln < 0 || ln > math.MaxUint32 {
|
||||
return fmt.Errorf("bad size for string: %d", ln)
|
||||
} else if idx > len(buf) {
|
||||
return fmt.Errorf("truncated document, expect %d bytes", len(buf)-idx)
|
||||
}
|
||||
|
||||
case tag == typeARRAY, tag == typeHASH:
|
||||
_, sz, err := varintdecode(buf[idx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
idx += sz + 1
|
||||
|
||||
case tag == typeCOPY, tag == typeALIAS, tag == typeREFP,
|
||||
tag == typeOBJECTV, tag == typeOBJECTV_FREEZE:
|
||||
|
||||
offset, sz, err := varintdecode(buf[idx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if offset < 0 || offset >= idx {
|
||||
return fmt.Errorf("tag %d refers to invalid offset: %d", tag, offset)
|
||||
}
|
||||
|
||||
doc.trackTable[offset] = -1
|
||||
idx += sz + 1
|
||||
|
||||
case tag >= typeARRAYREF_0 && tag < typeARRAYREF_0+16:
|
||||
idx++
|
||||
|
||||
case tag >= typeHASHREF_0 && tag < typeHASHREF_0+16:
|
||||
idx++
|
||||
|
||||
case tag >= typeSHORT_BINARY_0 && tag < typeSHORT_BINARY_0+32:
|
||||
idx += 1 + int(tag&0x1F)
|
||||
|
||||
// case tag == typeMANY: TODO
|
||||
case tag == typePACKET_START:
|
||||
return errors.New("unexpected start of new document")
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown tag: %d (0x%x) at offset %d", tag, tag, idx)
|
||||
}
|
||||
}
|
||||
|
||||
for idx := range doc.trackTable {
|
||||
doc.trackIdxs = append(doc.trackIdxs, idx)
|
||||
}
|
||||
|
||||
sort.Ints(doc.trackIdxs)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Merger) mergeItems(doc *mergerDoc) error {
|
||||
mbuf := m.buf
|
||||
dbuf := doc.buf
|
||||
didx := doc.startIdx
|
||||
|
||||
expElements, offset, err := m.expectedElements(dbuf[didx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if expElements < 0 || expElements > math.MaxUint32 {
|
||||
return fmt.Errorf("bad amount of expected elements: %d", expElements)
|
||||
}
|
||||
|
||||
didx += offset
|
||||
|
||||
// stack is needed for three things:
|
||||
// - keep track of expected things
|
||||
// - verify document consistency
|
||||
// if a value put on stack has the highest significant bit on,
|
||||
// it means that hash keys/values are processed
|
||||
stack := make([]uint32, 0, 16) // preallocate 16 nested levels
|
||||
stack = append(stack, uint32(expElements))
|
||||
|
||||
LOOP:
|
||||
for didx < len(dbuf) {
|
||||
tag := dbuf[didx]
|
||||
tag &^= trackFlag
|
||||
|
||||
docRelativeIdx := didx - doc.bodyOffset
|
||||
mrgRelativeIdx := len(mbuf) - m.bodyOffset
|
||||
trackme := len(doc.trackIdxs) > 0 && doc.trackIdxs[0] == docRelativeIdx
|
||||
|
||||
level := len(stack) - 1
|
||||
for stack[level]&^hashKeysValuesFlag == 0 {
|
||||
stack = stack[:level]
|
||||
level--
|
||||
|
||||
if level < 0 {
|
||||
break LOOP
|
||||
}
|
||||
}
|
||||
|
||||
// If m.DedupeStrings is true - dedup all strings, otherwise dedup only hash keys and class names.
|
||||
// The trick with stack[level] % 2 == 0 works because stack[level] for hashes is always even at
|
||||
// the beggining (for each item in hash we expect key and value). In practise it means,
|
||||
// that if stack[level] is even - a key is being processed, if stack[level] is odd - value is being processed
|
||||
dedupString := m.DedupeStrings || ((stack[level]&hashKeysValuesFlag) == hashKeysValuesFlag && stack[level]%2 == 0)
|
||||
|
||||
//fmt.Printf("0x%x (0x%x) at didx: %d (rlt: %d) len(dbuf): %d\n", tag, dbuf[didx], didx, didx-doc.bodyOffset, len(dbuf))
|
||||
//fmt.Printf("level: %d, value: %d len: %d\n", level, stack[level], len(stack))
|
||||
//fmt.Println("------")
|
||||
|
||||
switch {
|
||||
case tag < typeVARINT, tag == typeUNDEF, tag == typeCANONICAL_UNDEF, tag == typeTRUE, tag == typeFALSE, tag == typeSHORT_BINARY_0:
|
||||
mbuf = append(mbuf, dbuf[didx])
|
||||
didx++
|
||||
|
||||
case tag == typePAD, tag == typeREFN, tag == typeWEAKEN, tag == typeEXTEND:
|
||||
// this elemets are fake ones, so stack counter should not be decreased
|
||||
// but, I don't want to create another if-branch, so fake it
|
||||
stack[level]++
|
||||
mbuf = append(mbuf, dbuf[didx])
|
||||
didx++
|
||||
|
||||
case tag == typeVARINT, tag == typeZIGZAG:
|
||||
_, sz, err := varintdecode(dbuf[didx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
mbuf = append(mbuf, dbuf[didx:didx+sz+1]...)
|
||||
didx += sz + 1
|
||||
|
||||
case tag == typeFLOAT:
|
||||
mbuf = append(mbuf, dbuf[didx:didx+5]...)
|
||||
didx += 5 // 4 bytes + tag
|
||||
|
||||
case tag == typeDOUBLE:
|
||||
mbuf = append(mbuf, dbuf[didx:didx+9]...)
|
||||
didx += 9 // 8 bytes + tag
|
||||
|
||||
case tag == typeLONG_DOUBLE:
|
||||
mbuf = append(mbuf, dbuf[didx:didx+17]...)
|
||||
didx += 17 // 16 bytes + tag
|
||||
|
||||
case tag == typeSHORT_BINARY_0+1:
|
||||
mbuf = append(mbuf, dbuf[didx:didx+2]...)
|
||||
didx += 2
|
||||
|
||||
case tag == typeBINARY, tag == typeSTR_UTF8, tag > typeSHORT_BINARY_0+1 && tag < typeSHORT_BINARY_0+32:
|
||||
// I don't want to call readString here because of performance reasons:
|
||||
// this path is the hot spot, so keep it overhead-free as much as possible
|
||||
|
||||
var ln, sz int
|
||||
if tag > typeSHORT_BINARY_0 {
|
||||
ln = int(tag & 0x1F) // get length from tag
|
||||
} else {
|
||||
var err error
|
||||
ln, sz, err = varintdecode(dbuf[didx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
length := sz + ln + 1
|
||||
if ln < 0 || ln > math.MaxUint32 {
|
||||
return fmt.Errorf("bad size for string: %d", ln)
|
||||
} else if didx+length > len(dbuf) {
|
||||
return fmt.Errorf("truncated document, expect %d bytes", len(dbuf)-didx-length)
|
||||
}
|
||||
|
||||
if dedupString {
|
||||
val := dbuf[didx+sz+1 : didx+length]
|
||||
if savedOffset, ok := m.strTable[string(val)]; ok {
|
||||
mbuf = appendTagVarint(mbuf, typeCOPY, uint(savedOffset))
|
||||
mrgRelativeIdx = savedOffset
|
||||
} else {
|
||||
m.strTable[string(val)] = mrgRelativeIdx
|
||||
mbuf = append(mbuf, dbuf[didx:didx+length]...)
|
||||
}
|
||||
} else {
|
||||
mbuf = append(mbuf, dbuf[didx:didx+length]...)
|
||||
}
|
||||
|
||||
didx += length
|
||||
|
||||
case tag == typeCOPY, tag == typeREFP, tag == typeALIAS,
|
||||
tag == typeOBJECTV, tag == typeOBJECTV_FREEZE:
|
||||
|
||||
offset, sz, err := varintdecode(dbuf[didx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
targetOffset, ok := doc.trackTable[offset]
|
||||
|
||||
if !ok || targetOffset < 0 {
|
||||
return errors.New("bad target offset at COPY, ALIAS or REFP tag")
|
||||
}
|
||||
|
||||
mbuf = appendTagVarint(mbuf, dbuf[didx], uint(targetOffset))
|
||||
didx += sz + 1
|
||||
|
||||
if tag == typeALIAS || tag == typeREFP {
|
||||
mbuf[targetOffset] |= trackFlag
|
||||
} else if tag == typeOBJECTV || tag == typeOBJECTV_FREEZE {
|
||||
stack = append(stack, 1)
|
||||
}
|
||||
|
||||
case tag == typeARRAY, tag == typeHASH:
|
||||
ln, sz, err := varintdecode(dbuf[didx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
|
||||
}
|
||||
if ln < 0 {
|
||||
return errors.New("bad array or hash length")
|
||||
}
|
||||
|
||||
mbuf = append(mbuf, dbuf[didx:didx+sz+1]...)
|
||||
didx += sz + 1
|
||||
|
||||
if tag == typeHASH {
|
||||
stack = append(stack, uint32(ln*2)|hashKeysValuesFlag)
|
||||
} else {
|
||||
stack = append(stack, uint32(ln))
|
||||
}
|
||||
|
||||
case (tag >= typeARRAYREF_0 && tag < typeARRAYREF_0+16) || (tag >= typeHASHREF_0 && tag < typeHASHREF_0+16):
|
||||
mbuf = append(mbuf, dbuf[didx])
|
||||
didx++
|
||||
|
||||
// for hash read 2*ln items
|
||||
if tag >= typeHASHREF_0 {
|
||||
stack = append(stack, uint32(tag&0xF*2)|hashKeysValuesFlag)
|
||||
} else {
|
||||
stack = append(stack, uint32(tag&0xF))
|
||||
}
|
||||
|
||||
case tag == typeREGEXP:
|
||||
offset, str, err := readString(dbuf[didx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sizeToCopy := offset + len(str) + 1
|
||||
offset, str, err = readString(dbuf[didx+sizeToCopy:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sizeToCopy += offset + len(str)
|
||||
mbuf = append(mbuf, dbuf[didx:didx+sizeToCopy]...)
|
||||
didx += sizeToCopy
|
||||
|
||||
case tag == typeOBJECT, tag == typeOBJECT_FREEZE:
|
||||
// skip main tag for a second, and parse <STR-TAG>
|
||||
offset, str, err := readString(dbuf[didx+1:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
length := offset + len(str) + 1 // respect typeOBJECT tag
|
||||
if savedOffset, ok := m.objTable[string(str)]; ok {
|
||||
if tag == typeOBJECT {
|
||||
mbuf = appendTagVarint(mbuf, typeOBJECTV, uint(savedOffset))
|
||||
} else {
|
||||
mbuf = appendTagVarint(mbuf, typeOBJECTV_FREEZE, uint(savedOffset))
|
||||
}
|
||||
|
||||
mrgRelativeIdx = savedOffset
|
||||
} else {
|
||||
// +1 because we should refer to string tag, not object tag
|
||||
mrgRelativeIdx++
|
||||
m.objTable[string(str)] = mrgRelativeIdx
|
||||
mbuf = append(mbuf, dbuf[didx:didx+length]...)
|
||||
}
|
||||
|
||||
// parse <ITEM-TAG>
|
||||
stack = append(stack, 1)
|
||||
didx += length
|
||||
|
||||
case tag == typePACKET_START:
|
||||
return errors.New("unexpected start of new document")
|
||||
|
||||
default:
|
||||
// TODO typeMANY
|
||||
return fmt.Errorf("unknown tag: %d (0x%x) at offset %d", tag, tag, didx)
|
||||
}
|
||||
|
||||
stack[level]--
|
||||
|
||||
if trackme {
|
||||
// if tag is tracked, remember its offset
|
||||
doc.trackTable[docRelativeIdx] = mrgRelativeIdx
|
||||
doc.trackIdxs = doc.trackIdxs[1:]
|
||||
}
|
||||
}
|
||||
|
||||
m.length += expElements
|
||||
m.buf = mbuf
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Merger) expectedElements(b []byte) (int, int, error) {
|
||||
if m.KeepFlat {
|
||||
tag0 := b[0] &^ trackFlag
|
||||
tag1 := b[1] &^ trackFlag
|
||||
|
||||
switch m.TopLevelElement {
|
||||
case TopLevelArray:
|
||||
if tag0 == typeARRAY {
|
||||
ln, sz, err := varintdecode(b[1:])
|
||||
return ln, sz + 1, err
|
||||
}
|
||||
|
||||
case TopLevelArrayRef:
|
||||
if tag0 == typeREFN && tag1 == typeARRAY {
|
||||
ln, sz, err := varintdecode(b[2:])
|
||||
return ln, sz + 2, err
|
||||
} else if tag0 >= typeARRAYREF_0 && tag0 < typeARRAYREF_0+16 {
|
||||
return int(tag0 & 0xF), 1, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return 1, 0, nil // by default expect only one element
|
||||
}
|
||||
|
||||
func isShallowStringish(tag byte) bool {
|
||||
return tag == typeBINARY || tag == typeSTR_UTF8 || (tag >= typeSHORT_BINARY_0 && tag < typeSHORT_BINARY_0+32)
|
||||
}
|
||||
|
||||
func readString(buf []byte) (int, []byte, error) {
|
||||
tag := buf[0]
|
||||
tag &^= trackFlag
|
||||
|
||||
if !isShallowStringish(tag) {
|
||||
return 0, nil, fmt.Errorf("expected stringish but found %d (0x%x)", int(tag), int(tag))
|
||||
}
|
||||
|
||||
var ln, offset int
|
||||
if tag > typeSHORT_BINARY_0 {
|
||||
ln = int(tag & 0x1F) // get length from tag
|
||||
} else {
|
||||
var err error
|
||||
ln, offset, err = varintdecode(buf[1:])
|
||||
if err != nil {
|
||||
return 0, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
offset++ // respect tag itself
|
||||
if ln < 0 || ln > math.MaxUint32 {
|
||||
return 0, nil, fmt.Errorf("bad size for string: %d", ln)
|
||||
} else if offset+ln > len(buf) {
|
||||
return 0, nil, fmt.Errorf("truncated document, expect %d bytes", len(buf)-ln-offset)
|
||||
}
|
||||
|
||||
return offset, buf[offset : offset+ln], nil
|
||||
}
|
||||
|
||||
func appendTagVarint(by []byte, tag byte, n uint) []uint8 {
|
||||
// the slice should be allocated on stack due to escape analysis
|
||||
varintBuf := make([]byte, binary.MaxVarintLen64)
|
||||
varintBuf[0] = tag
|
||||
|
||||
idx := 1
|
||||
for n >= 0x80 {
|
||||
varintBuf[idx] = byte(n) | 0x80
|
||||
n >>= 7
|
||||
idx++
|
||||
}
|
||||
|
||||
varintBuf[idx] = byte(n)
|
||||
return append(by, varintBuf[:idx+1]...)
|
||||
}
|
|
@ -0,0 +1,39 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func BenchmarkMerger(b *testing.B) {
|
||||
files, _ := filepath.Glob("data/*.srl")
|
||||
if files == nil {
|
||||
b.Fatal("no files found")
|
||||
}
|
||||
|
||||
var data [][]byte
|
||||
for _, file := range files {
|
||||
buf, ok := ioutil.ReadFile(file)
|
||||
if ok != nil {
|
||||
b.Fatal("failed to read file: " + file)
|
||||
}
|
||||
|
||||
data = append(data, buf)
|
||||
}
|
||||
|
||||
b.ResetTimer()
|
||||
|
||||
m := NewMerger()
|
||||
r := rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
for i := 0; i < b.N; i++ {
|
||||
buf := data[r.Int()%len(data)]
|
||||
_, err := m.Append(buf)
|
||||
if err != nil {
|
||||
b.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
package sereal
|
||||
|
||||
// types for emulating perl data structure
|
||||
|
||||
// PerlObject represents a perl blessed reference
|
||||
type PerlObject struct {
|
||||
Class string
|
||||
Reference interface{}
|
||||
}
|
||||
|
||||
// PerlAlias represents an aliased value
|
||||
type PerlAlias struct {
|
||||
Alias interface{}
|
||||
}
|
||||
|
||||
// PerlWeakRef represents a weak reference
|
||||
type PerlWeakRef struct {
|
||||
Reference interface{}
|
||||
}
|
||||
|
||||
// PerlUndef represents perl's "undef" value
|
||||
type PerlUndef struct {
|
||||
canonical bool
|
||||
}
|
||||
|
||||
// perlCanonicalUndef is the value that represents the perl's PL_sv_undef and
|
||||
// is encoded via the CANONICAL_UNDEF tag. It must be the only instance having
|
||||
// the canonical field set to true.
|
||||
var perlCanonicalUndef = &PerlUndef{canonical: true}
|
||||
|
||||
// PerlCanonicalUndef returns a value that represents perl's shared undef (PL_sv_undef).
|
||||
//
|
||||
// For more details see
|
||||
// https://github.com/Sereal/Sereal/blob/master/sereal_spec.pod#user-content-dealing-with-undefined-values
|
||||
func PerlCanonicalUndef() *PerlUndef {
|
||||
return perlCanonicalUndef
|
||||
}
|
||||
|
||||
// PerlRegexp represents a perl regular expression
|
||||
type PerlRegexp struct {
|
||||
Pattern []byte
|
||||
Modifiers []byte
|
||||
}
|
||||
|
||||
// PerlFreeze represents an object's custom Freeze implementation
|
||||
type PerlFreeze struct {
|
||||
Class string
|
||||
Data []byte
|
||||
}
|
|
@ -0,0 +1,904 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
)
|
||||
|
||||
var roundtrips = []interface{}{
|
||||
true,
|
||||
false,
|
||||
1,
|
||||
10,
|
||||
100,
|
||||
200,
|
||||
300,
|
||||
0,
|
||||
-1,
|
||||
-15,
|
||||
15,
|
||||
-16,
|
||||
16,
|
||||
17,
|
||||
-17,
|
||||
-2613115362782646504,
|
||||
uint(0xdbbc596c24396f18),
|
||||
"hello",
|
||||
"hello, world",
|
||||
"twas brillig and the slithy toves and gyre and gimble in the wabe",
|
||||
float32(2.2),
|
||||
float32(9891234567890.098),
|
||||
float64(2.2),
|
||||
float64(9891234567890.098),
|
||||
[]interface{}{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14},
|
||||
[]interface{}{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15},
|
||||
[]interface{}{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16},
|
||||
[]interface{}{1, 100, 1000, 2000, 0xdeadbeef, float32(2.2), "hello, world", map[string]interface{}{"foo": []interface{}{1, 2, 3}}},
|
||||
map[string]interface{}{"foo": 1, "bar": 2, "baz": "qux"},
|
||||
}
|
||||
|
||||
func TestRoundtripGo(t *testing.T) {
|
||||
testRoundtrip(t, false, 1)
|
||||
testRoundtrip(t, false, 2)
|
||||
}
|
||||
|
||||
func TestRoundtripPerl(t *testing.T) {
|
||||
testRoundtrip(t, true, 1)
|
||||
testRoundtrip(t, true, 2)
|
||||
}
|
||||
|
||||
func testRoundtrip(t *testing.T, perlCompat bool, version int) {
|
||||
|
||||
e := &Encoder{PerlCompat: perlCompat, version: version}
|
||||
d := &Decoder{PerlCompat: false}
|
||||
|
||||
for _, v := range roundtrips {
|
||||
b, err := e.Marshal(v)
|
||||
if err != nil {
|
||||
t.Errorf("failed marshalling with perlCompat=%t : %v: %s\n", perlCompat, v, err)
|
||||
}
|
||||
var unp interface{}
|
||||
|
||||
err = d.Unmarshal(b, &unp)
|
||||
if err != nil {
|
||||
t.Errorf("perl compat: error during unmarshall: %s\n", err)
|
||||
}
|
||||
if !reflect.DeepEqual(v, unp) {
|
||||
t.Errorf("failed roundtripping with perlCompat=%t: %#v: got %#v\n", perlCompat, v, unp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestRoundtripCompat(t *testing.T) {
|
||||
|
||||
input := []interface{}{map[string]interface{}{"foo": []interface{}{1, 2, 3}}}
|
||||
expectGo := []interface{}{map[string]interface{}{"foo": []interface{}{1, 2, 3}}}
|
||||
expectPerlCompat := &[]interface{}{&map[string]interface{}{"foo": &[]interface{}{1, 2, 3}}}
|
||||
|
||||
e := &Encoder{}
|
||||
d := &Decoder{}
|
||||
|
||||
noCompat, _ := e.Marshal(input)
|
||||
|
||||
e.PerlCompat = true
|
||||
perlCompat, _ := e.Marshal(input)
|
||||
|
||||
// no perl compat on encode, no perl compat on decode
|
||||
var nono interface{}
|
||||
err := d.Unmarshal(noCompat, &nono)
|
||||
if err != nil {
|
||||
t.Errorf("perl compat: error during unmarshall: %s\n", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expectGo, nono) {
|
||||
t.Errorf("perl compat: no no failed: got %#v\n", nono)
|
||||
}
|
||||
|
||||
// perl compat on encode, no perl compat on decode
|
||||
var yesno interface{}
|
||||
err = d.Unmarshal(perlCompat, &yesno)
|
||||
if err != nil {
|
||||
t.Errorf("perl compat: error during unmarshall: %s\n", err)
|
||||
}
|
||||
if !reflect.DeepEqual(expectGo, yesno) {
|
||||
t.Errorf("perl compat: yes no failed: got %#v\n", yesno)
|
||||
}
|
||||
|
||||
d.PerlCompat = true
|
||||
|
||||
// no perl compat on encode, perl compat on decode
|
||||
var noyes interface{}
|
||||
err = d.Unmarshal(noCompat, &noyes)
|
||||
if err != nil {
|
||||
t.Errorf("perl compat: error during unmarshall: %s\n", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectGo, noyes) {
|
||||
t.Errorf("perl compat: no yes failed: got %#v\n", noyes)
|
||||
}
|
||||
|
||||
// perl compat on encode, yes perl compat on decode
|
||||
var yesyes interface{}
|
||||
|
||||
err = d.Unmarshal(perlCompat, &yesyes)
|
||||
if err != nil {
|
||||
t.Errorf("perl compat: error during unmarshall: %s\n", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(expectPerlCompat, yesyes) {
|
||||
t.Errorf("perl compat: yes yes failed: got %#v\n", yesyes)
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* To make the corpus of test files:
|
||||
* perl -I Perl/shared/t/lib/ -MSereal::TestSet -MSereal::Encoder -e'Sereal::TestSet::write_test_files("test_dir")'
|
||||
*
|
||||
* This runs the Decoder/Encoder over every file in the supplied directory and tells you when the bytes the encoder
|
||||
* outputs do not match the bytes in the test file. The purpose is to check if roundtripping to Perl type
|
||||
* datastructures works.
|
||||
*
|
||||
* If you pass a file as parameter it will do the same but do more detailed logging.
|
||||
*
|
||||
*/
|
||||
func TestCorpus(t *testing.T) {
|
||||
|
||||
e := &Encoder{PerlCompat: true}
|
||||
d := &Decoder{PerlCompat: true}
|
||||
|
||||
_ = e
|
||||
|
||||
debug := false
|
||||
|
||||
corpusFiles, err := filepath.Glob("test_dir/test_data_?????")
|
||||
|
||||
// corpusFiles, err = filepath.Glob("test_dir/test_data_00028")
|
||||
// debug = true
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("error opening test_dir: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
for _, corpusFile := range corpusFiles {
|
||||
|
||||
contents, err := ioutil.ReadFile(corpusFile)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("error opening test_dir/%s: %v", corpusFile, err)
|
||||
return
|
||||
}
|
||||
|
||||
var value interface{}
|
||||
|
||||
if debug {
|
||||
t.Log("unmarshalling..")
|
||||
t.Log(hex.Dump(contents))
|
||||
}
|
||||
err = d.Unmarshal(contents, &value)
|
||||
|
||||
if debug {
|
||||
t.Log("done")
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("unpacking %s generated an error: %v", corpusFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if debug {
|
||||
t.Log("marshalling")
|
||||
t.Log("value=", spew.Sdump(value))
|
||||
t.Logf(" =%#v\n", value)
|
||||
}
|
||||
b, err := e.Marshal(value)
|
||||
|
||||
if debug {
|
||||
t.Log("done")
|
||||
t.Log(hex.Dump(b))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("packing %s generated an error: %v", corpusFile, err)
|
||||
continue
|
||||
}
|
||||
|
||||
ioutil.WriteFile(corpusFile+"-go.out", b, 0600)
|
||||
}
|
||||
}
|
||||
|
||||
func TestSnappyArray(t *testing.T) {
|
||||
|
||||
e := &Encoder{}
|
||||
d := &Decoder{}
|
||||
|
||||
// test many duplicated strings -- this uses both the string table and snappy compressiong
|
||||
// this ensures we're not messing up the offsets when decoding
|
||||
|
||||
manydups := make([]string, 2048)
|
||||
for i := 0; i < len(manydups); i++ {
|
||||
manydups[i] = "hello, world " + strconv.Itoa(i%10)
|
||||
}
|
||||
|
||||
encoded, err := e.Marshal(manydups)
|
||||
if err != nil {
|
||||
t.Errorf("encoding a large array generated an error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
e.Compression = SnappyCompressor{Incremental: true}
|
||||
e.CompressionThreshold = 0 // always compress
|
||||
snencoded, err := e.Marshal(manydups)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("snappy encoding a large array generated an error: %v", err)
|
||||
}
|
||||
|
||||
if len(encoded) <= len(snencoded) {
|
||||
t.Fatalf("snappy failed to compress redundant array: encoded=%d snappy=%d\n", len(encoded), len(snencoded))
|
||||
}
|
||||
|
||||
var decoded []string
|
||||
err = d.Unmarshal(snencoded, &decoded)
|
||||
if err != nil {
|
||||
t.Fatalf("snappy decoding generated error: %v", err)
|
||||
}
|
||||
|
||||
if len(decoded) != 2048 {
|
||||
t.Fatalf("got wrong number of elements back: wanted=%d got=%d\n", len(manydups), len(decoded))
|
||||
}
|
||||
|
||||
for i := 0; i < 2048; i++ {
|
||||
s := decoded[i]
|
||||
expected := "hello, world " + strconv.Itoa(i%10)
|
||||
if s != expected {
|
||||
t.Errorf("failed decompressing many-dup string: s=%s expected=%s", s, expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructs(t *testing.T) {
|
||||
|
||||
type A struct {
|
||||
Name string
|
||||
Phone string
|
||||
Siblings int
|
||||
Spouse bool
|
||||
Money float64
|
||||
}
|
||||
|
||||
// some people
|
||||
Afoo := A{"mr foo", "12345", 10, true, 123.45}
|
||||
Abar := A{"mr bar", "54321", 5, false, 321.45}
|
||||
Abaz := A{"mr baz", "15243", 20, true, 543.21}
|
||||
|
||||
type nested1 struct {
|
||||
Person A
|
||||
}
|
||||
|
||||
type nested struct {
|
||||
Nested1 nested1
|
||||
}
|
||||
|
||||
type private struct {
|
||||
pbool bool
|
||||
pstr string
|
||||
pint int
|
||||
}
|
||||
|
||||
type semiprivate struct {
|
||||
Bool bool
|
||||
pbool bool
|
||||
String string
|
||||
pstr string
|
||||
pint int
|
||||
}
|
||||
|
||||
type ATags struct {
|
||||
Name string `sereal:"Phone"`
|
||||
Phone string `sereal:"Name"`
|
||||
Siblings int // no tag, isn't unpacked
|
||||
}
|
||||
|
||||
type ALowerTags struct {
|
||||
Name string `sereal:"name"`
|
||||
Phone string `sereal:"phone"`
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
what string
|
||||
input interface{}
|
||||
outvar interface{}
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
"struct with fields",
|
||||
Afoo,
|
||||
A{},
|
||||
Afoo,
|
||||
},
|
||||
{
|
||||
"struct with fields into map",
|
||||
Afoo,
|
||||
map[string]interface{}{},
|
||||
map[string]interface{}{
|
||||
"Name": "mr foo",
|
||||
"Phone": "12345",
|
||||
"Siblings": 10,
|
||||
"Spouse": true,
|
||||
"Money": 123.45,
|
||||
},
|
||||
},
|
||||
{
|
||||
"decode struct with tags",
|
||||
Afoo,
|
||||
ATags{},
|
||||
ATags{Name: "12345", Phone: "mr foo", Siblings: 0},
|
||||
},
|
||||
{
|
||||
"encode struct with tags",
|
||||
ATags{Name: "12345", Phone: "mr foo", Siblings: 10},
|
||||
A{},
|
||||
A{Name: "mr foo", Phone: "12345"},
|
||||
},
|
||||
{
|
||||
"decode struct with lower-case field names",
|
||||
ALowerTags{Name: "mr foo", Phone: "12345"},
|
||||
A{},
|
||||
A{Name: "mr foo", Phone: "12345"},
|
||||
},
|
||||
{
|
||||
"struct with private fields",
|
||||
private{false, "hello", 3},
|
||||
private{}, // zero value for struct
|
||||
private{},
|
||||
},
|
||||
{
|
||||
"semi-private struct",
|
||||
semiprivate{Bool: true, pbool: false, String: "world", pstr: "hello", pint: 3},
|
||||
semiprivate{},
|
||||
semiprivate{Bool: true, String: "world"},
|
||||
},
|
||||
{
|
||||
"nil slice of structs",
|
||||
[]A{Afoo, Abar, Abaz},
|
||||
[]A(nil),
|
||||
[]A{Afoo, Abar, Abaz},
|
||||
},
|
||||
{
|
||||
"0-length slice of structs",
|
||||
[]A{Afoo, Abar, Abaz},
|
||||
[]A{},
|
||||
[]A{Afoo, Abar, Abaz},
|
||||
},
|
||||
{
|
||||
"1-length slice of structs",
|
||||
[]A{Afoo, Abar, Abaz},
|
||||
[]A{A{}},
|
||||
[]A{Afoo},
|
||||
},
|
||||
{
|
||||
"nested",
|
||||
nested{nested1{Afoo}},
|
||||
nested{},
|
||||
nested{nested1{Afoo}},
|
||||
},
|
||||
}
|
||||
|
||||
e := &Encoder{}
|
||||
d := &Decoder{}
|
||||
|
||||
for _, v := range tests {
|
||||
|
||||
rinput := reflect.ValueOf(v.input)
|
||||
|
||||
x, err := e.Marshal(rinput.Interface())
|
||||
if err != nil {
|
||||
t.Errorf("error marshalling %s: %s\n", v.what, err)
|
||||
continue
|
||||
}
|
||||
|
||||
routvar := reflect.New(reflect.TypeOf(v.outvar))
|
||||
routvar.Elem().Set(reflect.ValueOf(v.outvar))
|
||||
|
||||
err = d.Unmarshal(x, routvar.Interface())
|
||||
if err != nil {
|
||||
t.Errorf("error unmarshalling %s: %s\n", v.what, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(routvar.Elem().Interface(), v.expected) {
|
||||
t.Errorf("roundtrip mismatch for %s: got: %#v expected: %#v\n", v.what, routvar.Elem().Interface(), v.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestDecodeToStruct(t *testing.T) {
|
||||
type obj struct {
|
||||
ValueStr string
|
||||
ValueByte []byte
|
||||
ValueInt int
|
||||
ValueSlice []float32
|
||||
ValueHash map[string][]byte
|
||||
}
|
||||
|
||||
exp := make([]obj, 3)
|
||||
exp[0] = obj{
|
||||
ValueStr: "string as string value which actually should be 32+ characters",
|
||||
ValueByte: []byte("string as binary value"),
|
||||
ValueInt: 10,
|
||||
ValueSlice: []float32{1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0, 17.0},
|
||||
ValueHash: map[string][]byte{
|
||||
"key1": []byte("unique value"),
|
||||
"key2": []byte("duplicate value"),
|
||||
"key3": []byte("deplicate value"),
|
||||
},
|
||||
}
|
||||
|
||||
exp[1] = obj{
|
||||
ValueStr: "another string as string value which actually should be 32+ characters",
|
||||
ValueByte: []byte("another string as binary value"),
|
||||
ValueInt: -10,
|
||||
ValueSlice: []float32{18.0, 19.0, 20.0},
|
||||
ValueHash: map[string][]byte{
|
||||
"key1": []byte("unique value"),
|
||||
"key2": []byte("duplicate value"),
|
||||
"key3": []byte("deplicate value"),
|
||||
},
|
||||
}
|
||||
|
||||
exp[2] = exp[0]
|
||||
|
||||
filename := "test_dir/test-decode-struct.srl"
|
||||
content, err := ioutil.ReadFile(filename)
|
||||
|
||||
if err != nil {
|
||||
t.Skip("run 'make test_files' and try again")
|
||||
return
|
||||
}
|
||||
|
||||
var slice []obj
|
||||
d := NewDecoder()
|
||||
|
||||
if err := d.Unmarshal(content, &slice); err != nil {
|
||||
t.Errorf("error unmarshalling: %s", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(exp, slice) {
|
||||
t.Errorf("failed decode into struct:\n\nexp: %#v:\n\ngot %#v\n", exp, slice)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructsWithPtrs(t *testing.T) {
|
||||
type First struct{ I int }
|
||||
type Second struct{ S string }
|
||||
type NestedPtr struct {
|
||||
A *First
|
||||
B *Second
|
||||
}
|
||||
tests := []struct {
|
||||
what string
|
||||
input interface{}
|
||||
outvar interface{}
|
||||
expected interface{}
|
||||
}{
|
||||
{
|
||||
"struct with two fields of different types",
|
||||
NestedPtr{&First{1}, &Second{"two"}},
|
||||
NestedPtr{},
|
||||
NestedPtr{&First{1}, &Second{"two"}},
|
||||
},
|
||||
{
|
||||
"struct with two nils of different types",
|
||||
NestedPtr{},
|
||||
NestedPtr{},
|
||||
NestedPtr{},
|
||||
},
|
||||
}
|
||||
|
||||
e := &Encoder{}
|
||||
d := &Decoder{}
|
||||
|
||||
for _, v := range tests {
|
||||
|
||||
rinput := reflect.ValueOf(v.input)
|
||||
|
||||
x, err := e.Marshal(rinput.Interface())
|
||||
if err != nil {
|
||||
t.Errorf("error marshalling %s: %s\n", v.what, err)
|
||||
continue
|
||||
}
|
||||
|
||||
routvar := reflect.New(reflect.TypeOf(v.outvar))
|
||||
routvar.Elem().Set(reflect.ValueOf(v.outvar))
|
||||
|
||||
err = d.Unmarshal(x, routvar.Interface())
|
||||
if err != nil {
|
||||
t.Errorf("error unmarshalling %s: %s\n", v.what, err)
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < routvar.Elem().NumField(); i++ {
|
||||
outfield := routvar.Elem().Field(i)
|
||||
outfield.Interface()
|
||||
expfield := reflect.ValueOf(v.expected).Field(i)
|
||||
|
||||
if !reflect.DeepEqual(outfield.Interface(), expfield.Interface()) {
|
||||
t.Errorf("roundtrip mismatch for %s: got: %#v expected: %#v\n", v.what, outfield.Interface(), expfield.Interface())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type ErrorBinaryUnmarshaler int
|
||||
|
||||
var errUnmarshaler = errors.New("error binary unmarshaler")
|
||||
|
||||
func (e *ErrorBinaryUnmarshaler) UnmarshalBinary(data []byte) error {
|
||||
return errUnmarshaler
|
||||
}
|
||||
|
||||
func TestBinaryMarshaller(t *testing.T) {
|
||||
|
||||
// our data
|
||||
now := time.Now()
|
||||
|
||||
e := &Encoder{}
|
||||
d := &Decoder{}
|
||||
|
||||
x, err := e.Marshal(now)
|
||||
if err != nil {
|
||||
t.Errorf("error marshalling %s", err)
|
||||
}
|
||||
|
||||
var tm time.Time
|
||||
|
||||
// unpack into something that expects the bytes
|
||||
err = d.Unmarshal(x, &tm)
|
||||
|
||||
if err != nil {
|
||||
t.Errorf("error unmarshalling: %s", err)
|
||||
}
|
||||
|
||||
if !now.Equal(tm) {
|
||||
t.Errorf("failed unpacking: got=%v wanted=%v\n", tm, now)
|
||||
}
|
||||
|
||||
// unpack into something that produces an error
|
||||
var errunmarshaler ErrorBinaryUnmarshaler
|
||||
err = d.Unmarshal(x, &errunmarshaler)
|
||||
if err == nil {
|
||||
t.Errorf("failed propagating error from unmarshaler")
|
||||
}
|
||||
|
||||
// unpack into something that isn't a marshaller
|
||||
var i int
|
||||
err = d.Unmarshal(x, &i)
|
||||
if err == nil {
|
||||
t.Errorf("failed to generate error trying to unpack into non-slice/unmashaler")
|
||||
}
|
||||
|
||||
// unpack into a byte slice
|
||||
bdata, _ := now.MarshalBinary()
|
||||
|
||||
var data []byte
|
||||
err = d.Unmarshal(x, &data)
|
||||
|
||||
if !bytes.Equal(bdata, data) {
|
||||
t.Errorf("failed unpacking into byte-slice: got=%v wanted=%v\n", tm, now)
|
||||
}
|
||||
|
||||
// unpack into a nil interface
|
||||
var intf interface{}
|
||||
err = d.Unmarshal(x, &intf)
|
||||
|
||||
var pfreeze *PerlFreeze
|
||||
var ok bool
|
||||
|
||||
if pfreeze, ok = intf.(*PerlFreeze); !ok {
|
||||
t.Errorf("failed unpacking into nil interface : got=%v", intf)
|
||||
}
|
||||
|
||||
if pfreeze.Class != "time.Time" || !bytes.Equal(pfreeze.Data, bdata) {
|
||||
t.Errorf("failed unpacking into nil interface : got=%v", pfreeze)
|
||||
}
|
||||
|
||||
// check that registering a type works
|
||||
var registerTime time.Time
|
||||
d.RegisterName("time.Time", ®isterTime)
|
||||
|
||||
// unpack into a nil interface should return a time.Time
|
||||
var tintf interface{}
|
||||
err = d.Unmarshal(x, &tintf)
|
||||
if err != nil {
|
||||
t.Errorf("error unpacking registered type: %s", err)
|
||||
}
|
||||
|
||||
var rtime *time.Time
|
||||
if rtime, ok = tintf.(*time.Time); ok {
|
||||
if !now.Equal(*rtime) {
|
||||
t.Errorf("failed unpacking registered type: got=%v wanted=%v\n", rtime, now)
|
||||
}
|
||||
} else {
|
||||
t.Errorf("failed unpacking registered nil interface : got=%v", tintf)
|
||||
}
|
||||
|
||||
// overwrite with our error type
|
||||
d.RegisterName("time.Time", &errunmarshaler)
|
||||
var eintf interface{}
|
||||
|
||||
err = d.Unmarshal(x, &eintf)
|
||||
if err != errUnmarshaler {
|
||||
t.Errorf("failed to error unpacking registered error type: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestUnmarshalHeaderError(t *testing.T) {
|
||||
|
||||
testcases := []struct {
|
||||
docHex string
|
||||
err error
|
||||
}{
|
||||
// Garbage
|
||||
{"badbadbadbad", ErrBadHeader},
|
||||
// Version 1 and 2, "=srl"
|
||||
{"3d73726c0100", nil},
|
||||
{"3d73726c0200", nil},
|
||||
// Version 3, "=srl" with a high-bit-set-on-the-"s"
|
||||
{"3df3726c0300", nil},
|
||||
// Version 3, "=srl" corrupted by accidental UTF8 encoding
|
||||
{"3dc3b3726c0300", ErrBadHeaderUTF8},
|
||||
// Forbidden version 2 and high-bit-set-on-the-"s" combination
|
||||
{"3df3726c0200", ErrBadHeader},
|
||||
// Forbidden version 3 and obsolete "=srl" magic string
|
||||
{"3d73726c0300", ErrBadHeader},
|
||||
// Non-existing (yet) version 4, "=srl" with a high-bit-set-on-the-"s"
|
||||
{"3df3726c0400", errors.New("document version '4' not yet supported")},
|
||||
}
|
||||
|
||||
d := NewDecoder()
|
||||
|
||||
for i, tc := range testcases {
|
||||
doc, err := hex.DecodeString(tc.docHex)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
continue
|
||||
}
|
||||
|
||||
got := d.UnmarshalHeaderBody(doc, nil, nil)
|
||||
wanted := tc.err
|
||||
|
||||
ok := false
|
||||
ok = ok || (got == nil && wanted == nil)
|
||||
ok = ok || (got != nil && wanted != nil && got.Error() == wanted.Error())
|
||||
if !ok {
|
||||
t.Errorf("test case #%v:\ngot : %v\nwanted: %v", i, got, wanted)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrepareFreezeRoundtrip(t *testing.T) {
|
||||
_, err := os.Stat("test_freeze")
|
||||
if os.IsNotExist(err) {
|
||||
return
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
|
||||
type StructWithTime struct{ time.Time }
|
||||
|
||||
tests := []struct {
|
||||
what string
|
||||
input interface{}
|
||||
outvar interface{}
|
||||
expected interface{}
|
||||
}{
|
||||
|
||||
{
|
||||
"Time",
|
||||
now,
|
||||
time.Time{},
|
||||
now,
|
||||
},
|
||||
{
|
||||
"Time_ptr",
|
||||
&now,
|
||||
&time.Time{},
|
||||
&now,
|
||||
},
|
||||
{
|
||||
"struct_Time",
|
||||
StructWithTime{now},
|
||||
StructWithTime{},
|
||||
StructWithTime{now},
|
||||
},
|
||||
{
|
||||
"struct_Time_ptr",
|
||||
&StructWithTime{now},
|
||||
&StructWithTime{},
|
||||
&StructWithTime{now},
|
||||
},
|
||||
}
|
||||
|
||||
for _, compat := range []bool{false, true} {
|
||||
for _, v := range tests {
|
||||
e := Encoder{PerlCompat: compat}
|
||||
d := Decoder{}
|
||||
|
||||
var name string
|
||||
if compat {
|
||||
name = "compat_" + v.what
|
||||
} else {
|
||||
name = v.what
|
||||
}
|
||||
|
||||
rinput := reflect.ValueOf(v.input)
|
||||
|
||||
x, err := e.Marshal(rinput.Interface())
|
||||
if err != nil {
|
||||
t.Errorf("error marshalling %s: %s\n", v.what, err)
|
||||
continue
|
||||
}
|
||||
|
||||
err = ioutil.WriteFile("test_freeze/"+name+"-go.out", x, 0600)
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
routvar := reflect.New(reflect.TypeOf(v.outvar))
|
||||
routvar.Elem().Set(reflect.ValueOf(v.outvar))
|
||||
|
||||
err = d.Unmarshal(x, routvar.Interface())
|
||||
if err != nil {
|
||||
t.Errorf("error unmarshalling %s: %s\n", v.what, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(routvar.Elem().Interface(), v.expected) {
|
||||
t.Errorf("roundtrip mismatch for %s: got: %#v expected: %#v\n", v.what, routvar.Elem().Interface(), v.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestFreezeRoundtrip(t *testing.T) {
|
||||
if os.Getenv("RUN_FREEZE") == "1" {
|
||||
d := Decoder{}
|
||||
|
||||
buf, err := ioutil.ReadFile("test_freeze/Time-go.out")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
var then time.Time
|
||||
d.Unmarshal(buf, &then)
|
||||
|
||||
type StructWithTime struct{ time.Time }
|
||||
tests := []struct {
|
||||
what string
|
||||
outvar interface{}
|
||||
expected interface{}
|
||||
}{
|
||||
|
||||
{
|
||||
"Time",
|
||||
time.Time{},
|
||||
then,
|
||||
},
|
||||
{
|
||||
"Time_ptr",
|
||||
&time.Time{},
|
||||
&then,
|
||||
},
|
||||
{
|
||||
"struct_Time",
|
||||
StructWithTime{},
|
||||
StructWithTime{then},
|
||||
},
|
||||
{
|
||||
"struct_Time_ptr",
|
||||
&StructWithTime{},
|
||||
&StructWithTime{then},
|
||||
},
|
||||
}
|
||||
|
||||
for _, v := range tests {
|
||||
for _, compat := range []string{"", "compat_"} {
|
||||
x, err := ioutil.ReadFile("test_freeze/" + compat + v.what + "-perl.out")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
routvar := reflect.New(reflect.TypeOf(v.outvar))
|
||||
routvar.Elem().Set(reflect.ValueOf(v.outvar))
|
||||
|
||||
err = d.Unmarshal(x, routvar.Interface())
|
||||
if err != nil {
|
||||
t.Errorf("error unmarshalling %s: %s\n", v.what, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(routvar.Elem().Interface(), v.expected) {
|
||||
t.Errorf("roundtrip mismatch for %s: got: %#v expected: %#v\n", v.what, routvar.Elem().Interface(), v.expected)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue130(t *testing.T) {
|
||||
t.Skip("Issue 130")
|
||||
|
||||
type AStructType struct {
|
||||
EmptySlice []*AStructType
|
||||
EmptySlice2 []AStructType
|
||||
}
|
||||
|
||||
t1 := &AStructType{}
|
||||
|
||||
b, err := Marshal(t1)
|
||||
if err != nil {
|
||||
t.Fatal("failed to marshal:", err)
|
||||
}
|
||||
|
||||
t12 := &AStructType{}
|
||||
err = Unmarshal(b, &t12)
|
||||
if err != nil {
|
||||
t.Fatal("failed to unmarshal:", err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(t1, t12) {
|
||||
t.Errorf("roundtrip slice pointers failed\nwant\n%#v\ngot\n%#v", t1, t12)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue131(t *testing.T) {
|
||||
type A struct {
|
||||
T *time.Time
|
||||
}
|
||||
|
||||
t0 := time.Now()
|
||||
a := A{T: &t0}
|
||||
|
||||
b, err := Marshal(&a)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var decoded A
|
||||
err = Unmarshal(b, &decoded)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssue135(t *testing.T) {
|
||||
type A struct {
|
||||
M map[string][]int
|
||||
}
|
||||
|
||||
u := A{M: make(map[string][]int)}
|
||||
|
||||
u.M["k99"] = []int{1, 2, 3}
|
||||
|
||||
b, err := Marshal(&u)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var decoded A
|
||||
err = Unmarshal(b, &decoded)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package sereal
|
||||
|
||||
import "math"
|
||||
|
||||
// SnappyCompressor compresses a Sereal document using the Snappy format.
|
||||
type SnappyCompressor struct {
|
||||
Incremental bool // enable incremental parsing
|
||||
}
|
||||
|
||||
func (c SnappyCompressor) compress(b []byte) ([]byte, error) {
|
||||
// XXX this could be more efficient! I'm creating a new buffer to
|
||||
// store the compressed document, which isn't necessary. You
|
||||
// could probably write directly to the slice after the header
|
||||
// and after the varint holding the length
|
||||
|
||||
if len(b) >= math.MaxUint32 {
|
||||
return nil, ErrTooLarge
|
||||
}
|
||||
|
||||
compressed := snappyEncode(nil, b)
|
||||
|
||||
if c.Incremental {
|
||||
// shrink down b to reuse the allocated buffer
|
||||
b = b[:0]
|
||||
b = varint(b, uint(len(compressed)))
|
||||
b = append(b, compressed...)
|
||||
} else {
|
||||
b = compressed
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func (c SnappyCompressor) decompress(b []byte) ([]byte, error) {
|
||||
if c.Incremental {
|
||||
ln, sz, err := varintdecode(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if ln < 0 || sz+ln > len(b) || ln > math.MaxInt32 {
|
||||
return nil, ErrCorrupt{errBadOffset}
|
||||
}
|
||||
b = b[sz : sz+ln]
|
||||
}
|
||||
|
||||
decompressed, err := snappyDecode(nil, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return decompressed, nil
|
||||
}
|
|
@ -0,0 +1,9 @@
|
|||
// +build clibs
|
||||
|
||||
package sereal
|
||||
|
||||
import snappy "github.com/dgryski/go-csnappy"
|
||||
|
||||
func snappyEncode(dst, src []byte) []byte { return snappy.Encode(dst, src) }
|
||||
|
||||
func snappyDecode(dst, src []byte) ([]byte, error) { return snappy.Decode(dst, src) }
|
|
@ -0,0 +1,9 @@
|
|||
// +build !clibs
|
||||
|
||||
package sereal
|
||||
|
||||
import "github.com/golang/snappy"
|
||||
|
||||
func snappyEncode(dst, src []byte) []byte { return snappy.Encode(dst, src) }
|
||||
|
||||
func snappyDecode(dst, src []byte) ([]byte, error) { return snappy.Decode(dst, src) }
|
|
@ -0,0 +1,57 @@
|
|||
package sereal
|
||||
|
||||
import "reflect"
|
||||
|
||||
type tagsCache struct {
|
||||
cmap map[reflect.Type]map[string]int
|
||||
}
|
||||
|
||||
func (tc *tagsCache) Get(ptr reflect.Value) map[string]int {
|
||||
if ptr.Kind() != reflect.Struct {
|
||||
return nil
|
||||
}
|
||||
|
||||
if tc.cmap == nil {
|
||||
tc.cmap = make(map[reflect.Type]map[string]int)
|
||||
}
|
||||
|
||||
ptrType := ptr.Type()
|
||||
if m, ok := tc.cmap[ptrType]; ok {
|
||||
return m
|
||||
}
|
||||
|
||||
numTags := 0
|
||||
m := make(map[string]int)
|
||||
|
||||
l := ptrType.NumField()
|
||||
for i := 0; i < l; i++ {
|
||||
field := ptrType.Field(i).Tag.Get("sereal")
|
||||
if field != "" {
|
||||
m[field] = i
|
||||
numTags++
|
||||
}
|
||||
}
|
||||
|
||||
if numTags != 0 {
|
||||
tc.cmap[ptrType] = m
|
||||
return m
|
||||
}
|
||||
|
||||
// build one from the public names
|
||||
for i := 0; i < l; i++ {
|
||||
pkgpath := ptrType.Field(i).PkgPath
|
||||
if pkgpath == "" { // exported
|
||||
field := ptrType.Field(i).Name
|
||||
m[field] = i
|
||||
numTags++
|
||||
}
|
||||
}
|
||||
|
||||
if numTags != 0 {
|
||||
tc.cmap[ptrType] = m
|
||||
return m
|
||||
}
|
||||
|
||||
tc.cmap[ptrType] = nil
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,346 @@
|
|||
#!/usr/bin/env perl
|
||||
|
||||
use strict;
|
||||
use warnings;
|
||||
|
||||
use blib "../../Perl/Decoder/blib/";
|
||||
use blib "../../Perl/Encoder/blib/";
|
||||
use lib "../../Perl/shared/t/lib/";
|
||||
|
||||
use Sereal::Decoder qw(decode_sereal);
|
||||
use Sereal::Encoder qw(encode_sereal);
|
||||
use Test::More;
|
||||
use Data::Dumper;
|
||||
|
||||
$Data::Dumper::Indent = 0;
|
||||
$Data::Dumper::Sortkeys = 1;
|
||||
|
||||
sub slurp {
|
||||
my $n = shift;
|
||||
open (my $fh, "<", $n) or die "can't open $n: $!\n";
|
||||
local $/ = undef;
|
||||
my $d = <$fh>;
|
||||
return $d;
|
||||
}
|
||||
|
||||
# Some parts of the Sereal specification (like aliases) are deliberately not
|
||||
# implemented in Go. As a result a set of tests checking omitted functionality
|
||||
# will fail. To reduce a level of false negatives here we list names of all
|
||||
# tests that are supposed to fail and skip them later.
|
||||
#
|
||||
# Multiple original tests share the same name making the following list not
|
||||
# 100% reliable and accurate. To mitigate it we also maintain a counter holding
|
||||
# a total number of tests to be skipped.
|
||||
#
|
||||
my $skip_total = 216;
|
||||
my %skip = map { $_ => 1 } (
|
||||
'array ref to aliases blessed array',
|
||||
'array ref to aliases complex hash',
|
||||
'array ref to aliases deep nest',
|
||||
'array ref to aliases double ref to foo',
|
||||
'array ref to aliases empty hash',
|
||||
'array ref to aliases empty string',
|
||||
'array ref to aliases eng-ref',
|
||||
'array ref to aliases escaped string',
|
||||
'array ref to aliases float',
|
||||
'array ref to aliases integer: -1',
|
||||
'array ref to aliases integer: -127',
|
||||
'array ref to aliases integer: -128',
|
||||
'array ref to aliases integer: -129',
|
||||
'array ref to aliases integer: -2147483647',
|
||||
'array ref to aliases integer: -2147483648',
|
||||
'array ref to aliases integer: -2147483649',
|
||||
'array ref to aliases integer: -9223372036854775807',
|
||||
'array ref to aliases integer: -9223372036854775808',
|
||||
'array ref to aliases integer: 0',
|
||||
'array ref to aliases integer: 0e0',
|
||||
'array ref to aliases integer: 0e1',
|
||||
'array ref to aliases integer: 1',
|
||||
'array ref to aliases integer: 11285066962739960988',
|
||||
'array ref to aliases integer: 126',
|
||||
'array ref to aliases integer: 127',
|
||||
'array ref to aliases integer: 128',
|
||||
'array ref to aliases integer: 129',
|
||||
'array ref to aliases integer: 18446744073709551614',
|
||||
'array ref to aliases integer: 18446744073709551615',
|
||||
'array ref to aliases integer: 1e3',
|
||||
'array ref to aliases integer: 2147483646',
|
||||
'array ref to aliases integer: 2147483647',
|
||||
'array ref to aliases integer: 2147483648',
|
||||
'array ref to aliases integer: 2147483649',
|
||||
'array ref to aliases integer: 254',
|
||||
'array ref to aliases integer: 255',
|
||||
'array ref to aliases integer: 256',
|
||||
'array ref to aliases integer: 257',
|
||||
'array ref to aliases integer: 3735928559',
|
||||
'array ref to aliases integer: 42',
|
||||
'array ref to aliases integer: 4294967295',
|
||||
'array ref to aliases integer: 9223372036854775806',
|
||||
'array ref to aliases integer: 9223372036854775807',
|
||||
'array ref to aliases integer: 9223372036854775808',
|
||||
'array ref to aliases largeish int',
|
||||
'array ref to aliases largeish negative int',
|
||||
'array ref to aliases long ascii string',
|
||||
'array ref to aliases long latin1 string',
|
||||
'array ref to aliases long utf8 string with only ascii',
|
||||
'array ref to aliases long utf8 string with only latin1 subset',
|
||||
'array ref to aliases long utf8 string',
|
||||
'array ref to aliases more complex',
|
||||
'array ref to aliases more escapes',
|
||||
'array ref to aliases negative big num',
|
||||
'array ref to aliases negative float',
|
||||
'array ref to aliases negative small float',
|
||||
'array ref to aliases float 0.1',
|
||||
'array ref to aliases float 0.2',
|
||||
'array ref to aliases float 0.3',
|
||||
'array ref to aliases float 0.4',
|
||||
'array ref to aliases float 0.5',
|
||||
'array ref to aliases float 0.6',
|
||||
'array ref to aliases float 0.7',
|
||||
'array ref to aliases float 0.8',
|
||||
'array ref to aliases float 0.9',
|
||||
'array ref to aliases small float 0.41',
|
||||
'array ref to aliases negative small float -0.13',
|
||||
'array ref to aliases complex hash with float',
|
||||
'array ref to aliases more complex with float',
|
||||
'array ref to aliases nested simple',
|
||||
'array ref to aliases positive big num',
|
||||
'array ref to aliases quote keys',
|
||||
'array ref to aliases ref to foo',
|
||||
'array ref to aliases ref to undef',
|
||||
'array ref to aliases reffy hash',
|
||||
'array ref to aliases refy array',
|
||||
'array ref to aliases regexp with inline modifiers',
|
||||
'array ref to aliases regexp with modifiers',
|
||||
'array ref to aliases short ascii string',
|
||||
'array ref to aliases short latin1 string',
|
||||
'array ref to aliases short utf8 string',
|
||||
'array ref to aliases simple array',
|
||||
'array ref to aliases simple hash',
|
||||
'array ref to aliases simple regexp',
|
||||
'array ref to aliases small float',
|
||||
'array ref to aliases small int',
|
||||
'array ref to aliases small negative int',
|
||||
'array ref to aliases undef value',
|
||||
'array ref to aliases undef',
|
||||
'array ref to aliases utf8 string',
|
||||
'array ref to aliases var strings',
|
||||
'array ref to aliases troublesome num/strs',
|
||||
"array ref to aliases troublesome num/strs ' 1 '",
|
||||
"array ref to aliases troublesome num/strs '0.0'",
|
||||
"array ref to aliases troublesome num/strs '00000.0000'",
|
||||
"array ref to aliases troublesome num/strs '0.0.0.0'",
|
||||
"array ref to aliases troublesome num/strs '.0'",
|
||||
"array ref to aliases troublesome num/strs ' .0'",
|
||||
"array ref to aliases troublesome num/strs ' 22'",
|
||||
"array ref to aliases troublesome num/strs '01'",
|
||||
"array ref to aliases troublesome num/strs '01.1'",
|
||||
"array ref to aliases troublesome num/strs ' 0 '",
|
||||
"array ref to aliases troublesome num/strs '.0'",
|
||||
"array ref to aliases troublesome num/strs '0.001'",
|
||||
"array ref to aliases troublesome num/strs '.1'",
|
||||
"array ref to aliases troublesome num/strs ' .1'",
|
||||
"array ref to aliases troublesome num/strs '.2'",
|
||||
"array ref to aliases troublesome num/strs '00'",
|
||||
"array ref to aliases troublesome num/strs '.00'",
|
||||
"array ref to aliases troublesome num/strs '0 but true'",
|
||||
"array ref to aliases troublesome num/strs '0E0'",
|
||||
"array ref to aliases largeish negative int -302001",
|
||||
"array ref to aliases largeish negative int -1234567",
|
||||
"array ref to aliases largeish negative int -12345678",
|
||||
"array ref to aliases long ascii string 'a' x 9999",
|
||||
"array ref to aliases long ascii string 'a' x 10000",
|
||||
"array ref to aliases long ascii string 'a' x 10001",
|
||||
"array ref to aliases long ascii string 'a' x 1023",
|
||||
"array ref to aliases long ascii string 'a' x 1024",
|
||||
"array ref to aliases long ascii string 'a' x 1025",
|
||||
"array ref to aliases long ascii string 'a' x 8191",
|
||||
"array ref to aliases long ascii string 'a' x 8192",
|
||||
"array ref to aliases long ascii string 'a' x 8193",
|
||||
"array ref to aliases long ascii string 'ab' x 9999",
|
||||
"array ref to aliases long ascii string 'ab' x 10000",
|
||||
"array ref to aliases long ascii string 'ab' x 10001",
|
||||
"array ref to aliases long ascii string 'ab' x 1023",
|
||||
"array ref to aliases long ascii string 'ab' x 1024",
|
||||
"array ref to aliases long ascii string 'ab' x 1025",
|
||||
"array ref to aliases long ascii string 'ab' x 8191",
|
||||
"array ref to aliases long ascii string 'ab' x 8192",
|
||||
"array ref to aliases long ascii string 'ab' x 8193",
|
||||
"array ref to aliases long ascii string 'abc' x 9999",
|
||||
"array ref to aliases long ascii string 'abc' x 10000",
|
||||
"array ref to aliases long ascii string 'abc' x 10001",
|
||||
"array ref to aliases long ascii string 'abc' x 1023",
|
||||
"array ref to aliases long ascii string 'abc' x 1024",
|
||||
"array ref to aliases long ascii string 'abc' x 1025",
|
||||
"array ref to aliases long ascii string 'abc' x 8191",
|
||||
"array ref to aliases long ascii string 'abc' x 8192",
|
||||
"array ref to aliases long ascii string 'abc' x 8193",
|
||||
"array ref to aliases long ascii string 'abcd' x 9999",
|
||||
"array ref to aliases long ascii string 'abcd' x 10000",
|
||||
"array ref to aliases long ascii string 'abcd' x 10001",
|
||||
"array ref to aliases long ascii string 'abcd' x 1023",
|
||||
"array ref to aliases long ascii string 'abcd' x 1024",
|
||||
"array ref to aliases long ascii string 'abcd' x 1025",
|
||||
"array ref to aliases long ascii string 'abcd' x 8191",
|
||||
"array ref to aliases long ascii string 'abcd' x 8192",
|
||||
"array ref to aliases long ascii string 'abcd' x 8193",
|
||||
'array ref to scalar refs to same largeish negative int -302001',
|
||||
'array ref to scalar refs to same largeish negative int -1234567',
|
||||
'array ref to scalar refs to same largeish negative int -12345678',
|
||||
'array ref to scalar refs to same float',
|
||||
'array ref to scalar refs to same integer: -1',
|
||||
'array ref to scalar refs to same integer: -127',
|
||||
'array ref to scalar refs to same integer: -128',
|
||||
'array ref to scalar refs to same integer: -129',
|
||||
'array ref to scalar refs to same integer: -2147483647',
|
||||
'array ref to scalar refs to same integer: -2147483648',
|
||||
'array ref to scalar refs to same integer: -2147483649',
|
||||
'array ref to scalar refs to same integer: -9223372036854775807',
|
||||
'array ref to scalar refs to same integer: -9223372036854775808',
|
||||
'array ref to scalar refs to same integer: 0',
|
||||
'array ref to scalar refs to same integer: 1',
|
||||
'array ref to scalar refs to same integer: 11285066962739960988',
|
||||
'array ref to scalar refs to same integer: 126',
|
||||
'array ref to scalar refs to same integer: 127',
|
||||
'array ref to scalar refs to same integer: 128',
|
||||
'array ref to scalar refs to same integer: 129',
|
||||
'array ref to scalar refs to same integer: 18446744073709551614',
|
||||
'array ref to scalar refs to same integer: 18446744073709551615',
|
||||
'array ref to scalar refs to same integer: 2147483646',
|
||||
'array ref to scalar refs to same integer: 2147483647',
|
||||
'array ref to scalar refs to same integer: 2147483648',
|
||||
'array ref to scalar refs to same integer: 2147483649',
|
||||
'array ref to scalar refs to same integer: 254',
|
||||
'array ref to scalar refs to same integer: 255',
|
||||
'array ref to scalar refs to same integer: 256',
|
||||
'array ref to scalar refs to same integer: 257',
|
||||
'array ref to scalar refs to same integer: 3735928559',
|
||||
'array ref to scalar refs to same integer: 42',
|
||||
'array ref to scalar refs to same integer: 4294967295',
|
||||
'array ref to scalar refs to same integer: 9223372036854775806',
|
||||
'array ref to scalar refs to same integer: 9223372036854775807',
|
||||
'array ref to scalar refs to same integer: 9223372036854775808',
|
||||
'array ref to scalar refs to same integer: 0e0',
|
||||
'array ref to scalar refs to same integer: 0e1',
|
||||
'array ref to scalar refs to same integer: 1e3',
|
||||
'array ref to scalar refs to same largeish int',
|
||||
'array ref to scalar refs to same largeish negative int',
|
||||
'array ref to scalar refs to same negative big num',
|
||||
'array ref to scalar refs to same float 0.1',
|
||||
'array ref to scalar refs to same float 0.2',
|
||||
'array ref to scalar refs to same float 0.3',
|
||||
'array ref to scalar refs to same float 0.4',
|
||||
'array ref to scalar refs to same float 0.5',
|
||||
'array ref to scalar refs to same float 0.6',
|
||||
'array ref to scalar refs to same float 0.7',
|
||||
'array ref to scalar refs to same float 0.8',
|
||||
'array ref to scalar refs to same float 0.9',
|
||||
'array ref to scalar refs to same small float 0.41',
|
||||
'array ref to scalar refs to same negative small float -0.13',
|
||||
'array ref to scalar refs to same negative float',
|
||||
'array ref to scalar refs to same negative small float',
|
||||
'array ref to scalar refs to same positive big num',
|
||||
'array ref to scalar refs to same small float',
|
||||
'array ref to scalar refs to same small int',
|
||||
'array ref to scalar refs to same small negative int',
|
||||
'repeated substructure (REFP): scalar ref',
|
||||
'scalar cross',
|
||||
'weak scalar cross',
|
||||
'weak thing copy (requires PAD)',
|
||||
'BlessedArrayCheck 1',
|
||||
'BlessedArrayCheck 2',
|
||||
'Scalar Cross Blessed Array',
|
||||
);
|
||||
|
||||
my $skipped = 0;
|
||||
|
||||
for my $n (glob("test_dir/test_data_?????")) {
|
||||
|
||||
(my $test_number = $n) =~ s/.*test_data_0*//;
|
||||
|
||||
chomp(my $name = slurp(sprintf("test_dir/test_name_%05d", $test_number)));
|
||||
|
||||
if ($skip{$name}) {
|
||||
SKIP: { skip "$name ($n) not implemented", 1; };
|
||||
$skipped++;
|
||||
next;
|
||||
}
|
||||
|
||||
if (not -f "$n-go.out") {
|
||||
fail($name);
|
||||
diag("No Go test output for $n");
|
||||
# die;
|
||||
next;
|
||||
}
|
||||
|
||||
my $testdata = slurp($n);
|
||||
my $p;
|
||||
eval {
|
||||
$p = decode_sereal($testdata);
|
||||
1;
|
||||
} or do {
|
||||
my $err = $@;
|
||||
fail($name);
|
||||
diag("Failed unpacking perl $n: $err");
|
||||
next;
|
||||
};
|
||||
|
||||
$testdata = slurp("$n-go.out");
|
||||
my $g;
|
||||
|
||||
eval {
|
||||
$g = decode_sereal($testdata);
|
||||
1;
|
||||
} or do {
|
||||
my $err = $@;
|
||||
fail($name);
|
||||
diag("Failed unpacking go $n: $err");
|
||||
next;
|
||||
};
|
||||
|
||||
my $dg = Dumper($g);
|
||||
my $dp = Dumper($p);
|
||||
|
||||
if (!ok($dg eq $dp, $name)) {
|
||||
diag("$n\nGot: $dg\nExp: $dp");
|
||||
# die;
|
||||
next;
|
||||
}
|
||||
}
|
||||
|
||||
is($skipped, $skip_total, "skipped expected number of tests");
|
||||
|
||||
{
|
||||
foreach my $class ("time.Time", "github.com/Sereal/Sereal/Go/sereal.StructWithTime", "_/home/travis/build/Sereal/Sereal/Go/sereal.StructWithTime") {
|
||||
no strict 'refs';
|
||||
*{"${class}::THAW"} = sub { my ( $pkg, $srl, $val ) = @_; bless \$val, $pkg };
|
||||
*{"${class}::FREEZE"} = sub { ${$_[0]} };
|
||||
}
|
||||
|
||||
for my $n (glob("test_freeze/*-go.out")) {
|
||||
my $testdata = slurp($n);
|
||||
my ( $name ) = ( $n =~ m{([^/]+)-go\.out$} );
|
||||
my $g;
|
||||
|
||||
eval {
|
||||
$g = decode_sereal($testdata);
|
||||
1;
|
||||
} or do {
|
||||
my $err = $@;
|
||||
fail($name);
|
||||
diag("Failed unpacking go $n: $err");
|
||||
next;
|
||||
};
|
||||
|
||||
( my $perl = $n ) =~ s{-go\.out$}{-perl.out};
|
||||
|
||||
open my $fh, ">", $perl or die "Can't open $perl for writing: $!";
|
||||
print $fh encode_sereal($g, { freeze_callbacks => 1 }) or die "print($perl): $!";
|
||||
close $fh or die "close($perl): $!";
|
||||
|
||||
pass($name);
|
||||
}
|
||||
}
|
||||
|
||||
done_testing();
|
||||
|
|
@ -0,0 +1,76 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestV2(t *testing.T) {
|
||||
|
||||
h := map[string]interface{}{
|
||||
"type": "web",
|
||||
"count": []interface{}{12, 14, 12},
|
||||
}
|
||||
|
||||
b := map[string]interface{}{
|
||||
"hello": "world",
|
||||
"foo": []interface{}{
|
||||
map[string]interface{}{"bar": 1},
|
||||
map[string]interface{}{"bar": 2},
|
||||
},
|
||||
}
|
||||
|
||||
e := NewEncoderV2()
|
||||
|
||||
enc, err := e.MarshalWithHeader(h, b)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
d := NewDecoder()
|
||||
|
||||
var dh map[string]interface{}
|
||||
var db map[string]interface{}
|
||||
|
||||
err = d.UnmarshalHeaderBody(enc, &dh, &db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(h, dh) {
|
||||
t.Errorf("failed to decode header:\ngot : %#v\nexpect: %#v\n", dh, h)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(b, db) {
|
||||
t.Errorf("failed to decode body:\ngot : %#v\nexpect: %#v\n", db, b)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestV2Compat(t *testing.T) {
|
||||
|
||||
// ugly because the hash values aren't typed as strings but as SHORT_BINARY
|
||||
h := map[string]interface{}{"type": []uint8{0x77, 0x65, 0x62}, "counts": []interface{}{12, 14, 12}}
|
||||
b := map[string]interface{}{"hello": []uint8{0x77, 0x6f, 0x72, 0x6c, 0x64}, "foo": []interface{}{map[string]interface{}{"bar": 1}, map[string]interface{}{"bar": 2}}}
|
||||
|
||||
enc, _ := hex.DecodeString("3d73726c0216015264747970656377656266636f756e7473430c0e0c526568656c6c6f65776f726c6463666f6f42516362617201512f1402")
|
||||
|
||||
var dh map[string]interface{}
|
||||
var db map[string]interface{}
|
||||
|
||||
d := NewDecoder()
|
||||
|
||||
err := d.UnmarshalHeaderBody(enc, &dh, &db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(h, dh) {
|
||||
t.Errorf("failed to decode header:\ngot : %#v\nexpect: %#v\n", dh, h)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(b, db) {
|
||||
t.Errorf("failed to decode body:\ngot : %#v\nexpect: %#v\n", db, b)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,69 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"compress/zlib"
|
||||
"math"
|
||||
)
|
||||
|
||||
// ZlibCompressor compresses a Sereal document using the zlib format.
|
||||
type ZlibCompressor struct {
|
||||
Level int // compression level, set to ZlibDefaultCompression by default
|
||||
}
|
||||
|
||||
// Zlib constants
|
||||
const (
|
||||
ZlibBestSpeed = zlib.BestSpeed
|
||||
ZlibBestCompression = zlib.BestCompression
|
||||
ZlibDefaultCompression = zlib.DefaultCompression
|
||||
)
|
||||
|
||||
func (c ZlibCompressor) compress(buf []byte) ([]byte, error) {
|
||||
// Prepend a compressed block with its length, i.e.:
|
||||
//
|
||||
// <Varint><Varint><Zlib Blob>
|
||||
// 1st varint indicates the length of the uncompressed document,
|
||||
// 2nd varint indicates the length of the compressed document.
|
||||
//
|
||||
// XXX It's the naive implementation, better to rework as described in the spec:
|
||||
// https://github.com/Sereal/Sereal/blob/master/sereal_spec.pod#encoding-the-length-of-compressed-documents
|
||||
|
||||
if c.Level == 0 {
|
||||
c.Level = ZlibDefaultCompression
|
||||
}
|
||||
|
||||
tail, err := zlibEncode(buf, c.Level)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var head []byte
|
||||
head = varint(head, uint(len(buf)))
|
||||
head = varint(head, uint(len(tail)))
|
||||
|
||||
return append(head, tail...), nil
|
||||
}
|
||||
|
||||
func (c ZlibCompressor) decompress(buf []byte) ([]byte, error) {
|
||||
// Read the claimed length of the uncompressed document
|
||||
uln, usz, err := varintdecode(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
buf = buf[usz:]
|
||||
|
||||
// Read the claimed length of the compressed document
|
||||
cln, csz, err := varintdecode(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if cln < 0 || cln > math.MaxInt32 || csz+cln > len(buf) {
|
||||
return nil, ErrCorrupt{errBadOffset}
|
||||
}
|
||||
|
||||
buf = buf[csz : csz+cln]
|
||||
|
||||
// XXX Perhaps check if len(buf) == cln
|
||||
|
||||
return zlibDecode(uln, buf)
|
||||
}
|
|
@ -0,0 +1,51 @@
|
|||
// +build clibs
|
||||
|
||||
package sereal
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lz
|
||||
|
||||
#include <zlib.h>
|
||||
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func zlibEncode(buf []byte, level int) ([]byte, error) {
|
||||
|
||||
dLen := C.compressBound(C.uLong(len(buf)))
|
||||
|
||||
dst := make([]byte, dLen)
|
||||
|
||||
err := C.compress2((*C.Bytef)(unsafe.Pointer(&dst[0])), (*C.uLongf)(unsafe.Pointer(&dLen)),
|
||||
(*C.Bytef)(unsafe.Pointer(&buf[0])), C.uLong(len(buf)),
|
||||
C.int(level))
|
||||
|
||||
// compression failed :(
|
||||
if err != C.Z_OK {
|
||||
return nil, errors.New("zlib error")
|
||||
}
|
||||
|
||||
return dst[:dLen], nil
|
||||
}
|
||||
|
||||
func zlibDecode(uln int, buf []byte) ([]byte, error) {
|
||||
|
||||
dst := make([]byte, uln)
|
||||
|
||||
dLen := uln
|
||||
|
||||
err := C.uncompress((*C.Bytef)(unsafe.Pointer(&dst[0])), (*C.uLongf)(unsafe.Pointer(&dLen)),
|
||||
(*C.Bytef)(unsafe.Pointer(&buf[0])), C.uLong(len(buf)))
|
||||
|
||||
// compression failed :(
|
||||
if err != C.Z_OK || uln != dLen {
|
||||
return nil, errors.New("zlib error")
|
||||
}
|
||||
|
||||
return dst, nil
|
||||
}
|
|
@ -0,0 +1,62 @@
|
|||
// +build !clibs
|
||||
|
||||
package sereal
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/zlib"
|
||||
"sync"
|
||||
)
|
||||
|
||||
var zlibWriterPools = make(map[int]*sync.Pool)
|
||||
|
||||
func init() {
|
||||
// -1 => 9
|
||||
for i := zlib.DefaultCompression; i <= zlib.BestCompression; i++ {
|
||||
zlibWriterPools[i] = &sync.Pool{
|
||||
New: func() interface{} {
|
||||
zw, _ := zlib.NewWriterLevel(nil, i)
|
||||
return zw
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func zlibEncode(buf []byte, level int) ([]byte, error) {
|
||||
|
||||
var comp bytes.Buffer
|
||||
|
||||
zw := zlibWriterPools[level].Get().(*zlib.Writer)
|
||||
defer zlibWriterPools[level].Put(zw)
|
||||
zw.Reset(&comp)
|
||||
|
||||
_, err := zw.Write(buf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = zw.Close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return comp.Bytes(), nil
|
||||
}
|
||||
|
||||
func zlibDecode(uln int, buf []byte) ([]byte, error) {
|
||||
zr, err := zlib.NewReader(bytes.NewReader(buf))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer zr.Close()
|
||||
|
||||
dec := bytes.NewBuffer(make([]byte, 0, uln))
|
||||
_, err = dec.ReadFrom(zr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// XXX Perhaps check if the number of read bytes == uln
|
||||
return dec.Bytes(), nil
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
# r1459
|
||||
--
|
||||
import "github.com/Xe/Tetra/1459"
|
||||
|
||||
Package r1459 implements a base structure to scrape out and utilize an RFC 1459
|
||||
frame in high level Go code.
|
||||
|
||||
## Usage
|
||||
|
||||
#### type RawLine
|
||||
|
||||
```go
|
||||
type RawLine struct {
|
||||
Source string `json: "source"`
|
||||
Verb string `json:"verb"`
|
||||
Args []string `json:"args"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Raw string `json:"-"` // Deprecated
|
||||
}
|
||||
```
|
||||
|
||||
IRC line
|
||||
|
||||
#### func NewRawLine
|
||||
|
||||
```go
|
||||
func NewRawLine(input string) (line *RawLine)
|
||||
```
|
||||
Create a new line and split out an RFC 1459 frame to a RawLine. This will not
|
||||
return an error if it fails. TODO: fix this.
|
||||
|
||||
#### func (*RawLine) String
|
||||
|
||||
```go
|
||||
func (r *RawLine) String() (res string)
|
||||
```
|
||||
String returns the serialized form of a RawLine as an RFC 1459 frame.
|
|
@ -0,0 +1,78 @@
|
|||
// Package r1459 implements a base structure to scrape out and utilize an RFC 1459
|
||||
// frame in high level Go code.
|
||||
package r1459
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// RawLine represents an IRC line.
|
||||
type RawLine struct {
|
||||
Source string `json:"source"`
|
||||
Verb string `json:"verb"`
|
||||
Args []string `json:"args"`
|
||||
Tags map[string]string `json:"tags"`
|
||||
Raw string `json:"-"` // Deprecated
|
||||
}
|
||||
|
||||
// NewRawLine creates a new line and split out an RFC 1459 frame to a RawLine. This will
|
||||
// not return an error if it fails.
|
||||
func NewRawLine(input string) (line *RawLine) {
|
||||
line = &RawLine{
|
||||
Raw: input,
|
||||
}
|
||||
|
||||
split := strings.Split(input, " ")
|
||||
|
||||
if split[0][0] == ':' {
|
||||
line.Source = split[0][1:]
|
||||
line.Verb = split[1]
|
||||
split = split[2:]
|
||||
} else {
|
||||
line.Source = ""
|
||||
line.Verb = split[0]
|
||||
split = split[1:]
|
||||
}
|
||||
|
||||
argstring := strings.Join(split, " ")
|
||||
extparam := strings.Split(argstring, " :")
|
||||
|
||||
if len(extparam) > 1 {
|
||||
ext := strings.Join(extparam[1:], " :")
|
||||
args := strings.Split(extparam[0], " ")
|
||||
|
||||
line.Args = append(args, ext)
|
||||
} else {
|
||||
line.Args = split
|
||||
}
|
||||
|
||||
if len(line.Args) == 0 {
|
||||
line.Args = []string{""}
|
||||
} else if line.Args[0][0] == ':' {
|
||||
line.Args[0] = strings.TrimPrefix(line.Args[0], ":")
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// String returns the serialized form of a RawLine as an RFC 1459 frame.
|
||||
func (r *RawLine) String() (res string) {
|
||||
if r.Source != "" {
|
||||
res = res + fmt.Sprintf(":%s ", r.Source)
|
||||
}
|
||||
|
||||
res = res + fmt.Sprintf("%s", r.Verb)
|
||||
|
||||
for i, arg := range r.Args {
|
||||
res = res + " "
|
||||
|
||||
if i == len(r.Args)-1 { // Make the last part of the line an extparam
|
||||
res = res + ":"
|
||||
}
|
||||
|
||||
res = res + arg
|
||||
}
|
||||
|
||||
return
|
||||
}
|
|
@ -0,0 +1,79 @@
|
|||
package r1459
|
||||
|
||||
import (
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestBaseParse(t *testing.T) {
|
||||
line := "FOO"
|
||||
|
||||
lineStruct := NewRawLine(line)
|
||||
|
||||
if lineStruct.Verb != "FOO" {
|
||||
t.Fatalf("Line verb expected to be FOO, it is %s", lineStruct.Verb)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPRIVMSGParse(t *testing.T) {
|
||||
line := ":Xena!oper@yolo-swag.com PRIVMSG #niichan :Why hello there"
|
||||
|
||||
lineStruct := NewRawLine(line)
|
||||
|
||||
if lineStruct.Verb != "PRIVMSG" {
|
||||
t.Fatalf("Line verb expected to be PRIVMSG, it is %s", lineStruct.Verb)
|
||||
}
|
||||
|
||||
if lineStruct.Source != "Xena!oper@yolo-swag.com" {
|
||||
t.Fatalf("Line source expected to be PRIVMSG, it is %s", lineStruct.Source)
|
||||
}
|
||||
|
||||
if len(lineStruct.Args) != 2 {
|
||||
t.Fatalf("Line arg count expected to be 2, it is %s", len(lineStruct.Args))
|
||||
}
|
||||
|
||||
if lineStruct.Args[0] != "#niichan" {
|
||||
t.Fatalf("Line arg 0 expected to be #niichan, it is %s", lineStruct.Args[0])
|
||||
}
|
||||
|
||||
if lineStruct.Args[1] != "Why hello there" {
|
||||
t.Fatalf("Line arg 1 expected to be 'Why hello there', it is %s", lineStruct.Args[1])
|
||||
}
|
||||
}
|
||||
|
||||
// This test case has previously been known to crash this library.
|
||||
func TestPreviouslyBreakingLine(t *testing.T) {
|
||||
line := ":649AAAABS AWAY"
|
||||
|
||||
lineStruct := NewRawLine(line)
|
||||
|
||||
if lineStruct.Source != "649AAAABS" {
|
||||
t.Fatalf("Line source expected to be 649AAAABS, it is %s", lineStruct.Source)
|
||||
}
|
||||
|
||||
if lineStruct.Verb != "AWAY" {
|
||||
t.Fatalf("Line verb expected to be AWAY, it is %s", lineStruct.Verb)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringWithSource(t *testing.T) {
|
||||
line := &RawLine{
|
||||
Source: "Foo",
|
||||
Verb: "BAR",
|
||||
Args: []string{"#bar", "fozbroz arg with spaces"},
|
||||
}
|
||||
|
||||
if res := line.String(); res != ":Foo BAR #bar :fozbroz arg with spaces" {
|
||||
t.Fatalf("Expected :Foo BAR #bar :fozbroz arg with spaces and got %s", res)
|
||||
}
|
||||
}
|
||||
|
||||
func TestStringWithoutSource(t *testing.T) {
|
||||
line := &RawLine{
|
||||
Verb: "BAR",
|
||||
Args: []string{"#bar", "fozbroz arg with spaces"},
|
||||
}
|
||||
|
||||
if res := line.String(); res != "BAR #bar :fozbroz arg with spaces" {
|
||||
t.Fatalf("Expected BAR #bar :fozbroz arg with spaces and got %s", res)
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) [2016] [Asdine El Hrychy]
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,350 @@
|
|||
# Storm
|
||||
|
||||
[![Build Status](https://travis-ci.org/asdine/storm.svg)](https://travis-ci.org/asdine/storm)
|
||||
[![GoDoc](https://godoc.org/github.com/asdine/storm?status.svg)](https://godoc.org/github.com/asdine/storm)
|
||||
[![Go Report Card](https://goreportcard.com/badge/github.com/asdine/storm)](https://goreportcard.com/report/github.com/asdine/storm)
|
||||
[![Coverage](http://gocover.io/_badge/github.com/asdine/storm)](http://gocover.io/github.com/asdine/storm)
|
||||
|
||||
Storm is simple and powerful ORM for [BoltDB](https://github.com/boltdb/bolt). The goal of this project is to provide a simple way to save any object in BoltDB and to easily retrieve it.
|
||||
|
||||
## Getting Started
|
||||
|
||||
```bash
|
||||
go get -u github.com/asdine/storm
|
||||
```
|
||||
|
||||
## Import Storm
|
||||
|
||||
```go
|
||||
import "github.com/asdine/storm"
|
||||
```
|
||||
|
||||
## Open a database
|
||||
|
||||
Quick way of opening a database
|
||||
```go
|
||||
db, err := storm.Open("my.db")
|
||||
|
||||
defer db.Close()
|
||||
```
|
||||
|
||||
`Open` can receive multiple options to customize the way it behaves. See [Options](#options) below
|
||||
|
||||
## Simple ORM
|
||||
|
||||
### Declare your structures
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ID int // primary key
|
||||
Group string `storm:"index"` // this field will be indexed
|
||||
Email string `storm:"unique"` // this field will be indexed with a unique constraint
|
||||
Name string // this field will not be indexed
|
||||
Age int `storm:"index"`
|
||||
}
|
||||
```
|
||||
|
||||
The primary key can be of any type as long as it is not a zero value. Storm will search for the tag `id`, if not present Storm will search for a field named `ID`.
|
||||
|
||||
```go
|
||||
type User struct {
|
||||
ThePrimaryKey string `storm:"id"`// primary key
|
||||
Group string `storm:"index"` // this field will be indexed
|
||||
Email string `storm:"unique"` // this field will be indexed with a unique constraint
|
||||
Name string // this field will not be indexed
|
||||
}
|
||||
```
|
||||
|
||||
Storm handles tags in nested structures with the `inline` tag
|
||||
|
||||
```go
|
||||
type Base struct {
|
||||
Ident bson.ObjectId `storm:"id"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
Base `storm:"inline"`
|
||||
Group string `storm:"index"`
|
||||
Email string `storm:"unique"`
|
||||
Name string
|
||||
CreatedAt time.Time `storm:"index"`
|
||||
}
|
||||
```
|
||||
|
||||
### Save your object
|
||||
|
||||
```go
|
||||
user := User{
|
||||
ID: 10,
|
||||
Group: "staff",
|
||||
Email: "john@provider.com",
|
||||
Name: "John",
|
||||
Age: 21,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := db.Save(&user)
|
||||
// err == nil
|
||||
|
||||
user.ID++
|
||||
err = db.Save(&user)
|
||||
// err == "already exists"
|
||||
```
|
||||
|
||||
That's it.
|
||||
|
||||
`Save` creates or updates all the required indexes and buckets, checks the unique constraints and saves the object to the store.
|
||||
|
||||
### Fetch your object
|
||||
|
||||
Only indexed fields can be used to find a record
|
||||
|
||||
```go
|
||||
var user User
|
||||
err := db.One("Email", "john@provider.com", &user)
|
||||
// err == nil
|
||||
|
||||
err = db.One("Name", "John", &user)
|
||||
// err == "not found"
|
||||
```
|
||||
|
||||
### Fetch multiple objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Find("Group", "staff", &users)
|
||||
```
|
||||
|
||||
### Fetch all objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.All(&users)
|
||||
```
|
||||
|
||||
### Fetch all objects sorted by index
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.AllByIndex("CreatedAt", &users)
|
||||
```
|
||||
|
||||
### Fetch a range of objects
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Range("Age", 10, 21, &users)
|
||||
```
|
||||
|
||||
### Skip and Limit
|
||||
|
||||
```go
|
||||
var users []User
|
||||
err := db.Find("Group", "staff", &users, storm.Skip(10))
|
||||
err = db.Find("Group", "staff", &users, storm.Limit(10))
|
||||
err = db.Find("Group", "staff", &users, storm.Limit(10), storm.Skip(10))
|
||||
|
||||
err = db.All(&users, storm.Limit(10), storm.Skip(10))
|
||||
err = db.AllByIndex("CreatedAt", &users, storm.Limit(10), storm.Skip(10))
|
||||
err = db.Range("Age", 10, 21, &users, storm.Limit(10), storm.Skip(10))
|
||||
```
|
||||
|
||||
### Remove an object
|
||||
|
||||
```go
|
||||
err := db.Remove(&user)
|
||||
```
|
||||
|
||||
### Initialize buckets and indexes before saving an object
|
||||
|
||||
```go
|
||||
err := db.Init(&User{})
|
||||
```
|
||||
|
||||
Useful when starting your application
|
||||
|
||||
### Transactions
|
||||
|
||||
```go
|
||||
tx, err := db.Begin(true)
|
||||
|
||||
accountA.Amount -= 100
|
||||
accountB.Amount += 100
|
||||
|
||||
err = tx.Save(accountA)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
err = tx.Save(accountB)
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
return err
|
||||
}
|
||||
|
||||
tx.Commit()
|
||||
```
|
||||
### Options
|
||||
|
||||
Storm options are functions that can be passed when constructing you Storm instance. You can pass it any number of options.
|
||||
|
||||
#### BoltOptions
|
||||
|
||||
By default, Storm opens a database with the mode `0600` and a timeout of one second.
|
||||
You can change this behavior by using `BoltOptions`
|
||||
|
||||
```go
|
||||
db, err := storm.Open("my.db", storm.BoltOptions(0600, &bolt.Options{Timeout: 1 * time.Second}))
|
||||
```
|
||||
|
||||
#### EncodeDecoder
|
||||
|
||||
To store the data in BoltDB, Storm encodes it in GOB by default. If you wish to change this behavior you can pass a codec that implements [`codec.EncodeDecoder`](https://godoc.org/github.com/asdine/storm/codec#EncodeDecoder) via the [`storm.Codec`](https://godoc.org/github.com/asdine/storm#Codec) option:
|
||||
|
||||
```go
|
||||
db := storm.Open("my.db", storm.Codec(myCodec))
|
||||
```
|
||||
|
||||
##### Provided Codecs
|
||||
|
||||
You can easily implement your own `EncodeDecoder`, but Storm comes with built-in support for [GOB](https://godoc.org/github.com/asdine/storm/codec/gob) (default), [JSON](https://godoc.org/github.com/asdine/storm/codec/json), [Sereal](https://godoc.org/github.com/asdine/storm/codec/sereal) and [Protocol Buffers](https://godoc.org/github.com/asdine/storm/codec/protobuf)
|
||||
|
||||
These can be used by importing the relevant package and use that codec to configure Storm. The example below shows all three (without proper error handling):
|
||||
|
||||
```go
|
||||
import (
|
||||
"github.com/asdine/storm"
|
||||
"github.com/asdine/storm/codec/gob"
|
||||
"github.com/asdine/storm/codec/json"
|
||||
"github.com/asdine/storm/codec/sereal"
|
||||
"github.com/asdine/storm/codec/protobuf"
|
||||
)
|
||||
|
||||
var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
|
||||
var jsonDb, _ = storm.Open("json.db", storm.Codec(json.Codec))
|
||||
var serealDb, _ = storm.Open("sereal.db", storm.Codec(sereal.Codec))
|
||||
var protobufDb, _ = storm.Open("protobuf.db", storm.Codec(protobuf.Codec))
|
||||
```
|
||||
|
||||
#### Auto Increment
|
||||
|
||||
Storm can auto increment integer IDs so you don't have to worry about that when saving your objects.
|
||||
|
||||
```go
|
||||
db := storm.Open("my.db", storm.AutoIncrement())
|
||||
```
|
||||
|
||||
## Nodes and nested buckets
|
||||
|
||||
Storm takes advantage of BoltDB nested buckets feature by using `storm.Node`.
|
||||
A `storm.Node` is the underlying object used by `storm.DB` to manipulate a bucket.
|
||||
To create a nested bucket and use the same API as `storm.DB`, you can use the `DB.From` method.
|
||||
|
||||
```go
|
||||
repo := db.From("repo")
|
||||
|
||||
err := repo.Save(&Issue{
|
||||
Title: "I want more features",
|
||||
Author: user.ID,
|
||||
})
|
||||
|
||||
err = repo.Save(newRelease("0.10"))
|
||||
|
||||
var issues []Issue
|
||||
err = repo.Find("Author", user.ID, &issues)
|
||||
|
||||
var release Release
|
||||
err = repo.One("Tag", "0.10", &release)
|
||||
```
|
||||
|
||||
You can also chain the nodes to create a hierarchy
|
||||
|
||||
```go
|
||||
chars := db.From("characters")
|
||||
heroes := chars.From("heroes")
|
||||
enemies := chars.From("enemies")
|
||||
|
||||
items := db.From("items")
|
||||
potions := items.From("consumables").From("medicine").From("potions")
|
||||
```
|
||||
You can even pass the entire hierarchy as arguments to `From`:
|
||||
|
||||
```go
|
||||
privateNotes := db.From("notes", "private")
|
||||
workNotes := db.From("notes", "work")
|
||||
```
|
||||
|
||||
## Simple Key/Value store
|
||||
|
||||
Storm can be used as a simple, robust, key/value store that can store anything.
|
||||
The key and the value can be of any type as long as the key is not a zero value.
|
||||
|
||||
Saving data :
|
||||
```go
|
||||
db.Set("logs", time.Now(), "I'm eating my breakfast man")
|
||||
db.Set("sessions", bson.NewObjectId(), &someUser)
|
||||
db.Set("weird storage", "754-3010", map[string]interface{}{
|
||||
"hair": "blonde",
|
||||
"likes": []string{"cheese", "star wars"},
|
||||
})
|
||||
```
|
||||
|
||||
Fetching data :
|
||||
```go
|
||||
user := User{}
|
||||
db.Get("sessions", someObjectId, &user)
|
||||
|
||||
var details map[string]interface{}
|
||||
db.Get("weird storage", "754-3010", &details)
|
||||
|
||||
db.Get("sessions", someObjectId, &details)
|
||||
```
|
||||
|
||||
Deleting data :
|
||||
```go
|
||||
db.Delete("sessions", someObjectId)
|
||||
db.Delete("weird storage", "754-3010")
|
||||
```
|
||||
|
||||
## BoltDB
|
||||
|
||||
BoltDB is still easily accessible and can be used as usual
|
||||
|
||||
```go
|
||||
db.Bolt.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte("my bucket"))
|
||||
val := bucket.Get([]byte("any id"))
|
||||
fmt.Println(string(val))
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
A transaction can be also be passed to Storm
|
||||
|
||||
```go
|
||||
db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
dbx := db.WithTransaction(tx)
|
||||
err = dbx.Save(&user)
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
## TODO
|
||||
|
||||
- Search
|
||||
- Reverse order
|
||||
- More indexes
|
||||
|
||||
## License
|
||||
|
||||
MIT
|
||||
|
||||
## Author
|
||||
|
||||
**Asdine El Hrychy**
|
||||
|
||||
- [Twitter](https://twitter.com/asdine_)
|
||||
- [Github](https://github.com/asdine)
|
|
@ -0,0 +1,180 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// AllByIndex gets all the records of a bucket that are indexed in the specified index
|
||||
func (n *Node) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
|
||||
if fieldName == "" {
|
||||
return n.All(to, options...)
|
||||
}
|
||||
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
|
||||
return ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
typ := reflect.Indirect(ref).Type().Elem()
|
||||
|
||||
if typ.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
|
||||
newElem := reflect.New(typ)
|
||||
|
||||
info, err := extract(newElem.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.ID.Field.Name() == fieldName {
|
||||
return n.All(to, options...)
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.allByIndex(n.tx, fieldName, info, &ref, opts)
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return n.allByIndex(tx, fieldName, info, &ref, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) allByIndex(tx *bolt.Tx, fieldName string, info *modelInfo, ref *reflect.Value, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, info.Name)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("bucket %s not found", info.Name)
|
||||
}
|
||||
|
||||
idxInfo, ok := info.Indexes[fieldName]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, idxInfo.Type, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.AllRecords(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
|
||||
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
err = n.s.Codec.Decode(raw, results.Index(i).Addr().Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reflect.Indirect(*ref).Set(results)
|
||||
return nil
|
||||
}
|
||||
|
||||
// All gets all the records of a bucket
|
||||
func (n *Node) All(to interface{}, options ...func(*index.Options)) error {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
|
||||
return ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
rtyp := reflect.Indirect(ref).Type().Elem()
|
||||
typ := rtyp
|
||||
|
||||
if rtyp.Kind() == reflect.Ptr {
|
||||
typ = typ.Elem()
|
||||
}
|
||||
|
||||
newElem := reflect.New(typ)
|
||||
|
||||
info, err := extract(newElem.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.all(n.tx, info, &ref, rtyp, typ, opts)
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return n.all(tx, info, &ref, rtyp, typ, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) all(tx *bolt.Tx, info *modelInfo, ref *reflect.Value, rtyp, typ reflect.Type, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, info.Name)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("bucket %s not found", info.Name)
|
||||
}
|
||||
|
||||
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), 0, 0)
|
||||
c := bucket.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
newElem := reflect.New(typ)
|
||||
err := n.s.Codec.Decode(v, newElem.Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if rtyp.Kind() == reflect.Ptr {
|
||||
results = reflect.Append(results, newElem)
|
||||
} else {
|
||||
results = reflect.Append(results, reflect.Indirect(newElem))
|
||||
}
|
||||
}
|
||||
|
||||
reflect.Indirect(*ref).Set(results)
|
||||
return nil
|
||||
}
|
||||
|
||||
// AllByIndex gets all the records of a bucket that are indexed in the specified index
|
||||
func (s *DB) AllByIndex(fieldName string, to interface{}, options ...func(*index.Options)) error {
|
||||
return s.root.AllByIndex(fieldName, to, options...)
|
||||
}
|
||||
|
||||
// All get all the records of a bucket
|
||||
func (s *DB) All(to interface{}, options ...func(*index.Options)) error {
|
||||
return s.root.All(to, options...)
|
||||
}
|
|
@ -0,0 +1,177 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestAllByIndex(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
w := User{Name: "John", ID: i + 1, Slug: fmt.Sprintf("John%d", i+1), DateOfBirth: time.Now().Add(-time.Duration(i*10) * time.Minute)}
|
||||
err := db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err := db.AllByIndex("", nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrSlicePtrNeeded, err)
|
||||
|
||||
var users []User
|
||||
|
||||
err = db.AllByIndex("DateOfBirth", &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 100, users[0].ID)
|
||||
assert.Equal(t, 1, users[99].ID)
|
||||
|
||||
err = db.AllByIndex("Name", &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 100, users[99].ID)
|
||||
|
||||
y := UniqueNameUser{Name: "Jake", ID: 200}
|
||||
err = db.Save(&y)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var y2 []UniqueNameUser
|
||||
err = db.AllByIndex("ID", &y2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, y2, 1)
|
||||
|
||||
n := NestedID{}
|
||||
n.ID = "100"
|
||||
n.Name = "John"
|
||||
|
||||
err = db.Save(&n)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var n2 []NestedID
|
||||
err = db.AllByIndex("ID", &n2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, n2, 1)
|
||||
|
||||
err = db.AllByIndex("Name", &users, Limit(10))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 10)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 10, users[9].ID)
|
||||
|
||||
err = db.AllByIndex("Name", &users, Limit(200))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 100, users[99].ID)
|
||||
|
||||
err = db.AllByIndex("Name", &users, Limit(-10))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 100, users[99].ID)
|
||||
|
||||
err = db.AllByIndex("Name", &users, Skip(200))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 0)
|
||||
|
||||
err = db.AllByIndex("Name", &users, Skip(-10))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 100, users[99].ID)
|
||||
|
||||
err = db.AllByIndex("ID", &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 100, users[99].ID)
|
||||
|
||||
err = db.AllByIndex("ID", &users, Limit(10))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 10)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 10, users[9].ID)
|
||||
|
||||
err = db.AllByIndex("ID", &users, Skip(10))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 90)
|
||||
assert.Equal(t, 11, users[0].ID)
|
||||
assert.Equal(t, 100, users[89].ID)
|
||||
|
||||
err = db.AllByIndex("Name", &users, Limit(10), Skip(10))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 10)
|
||||
assert.Equal(t, 11, users[0].ID)
|
||||
assert.Equal(t, 20, users[9].ID)
|
||||
}
|
||||
|
||||
func TestAll(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
w := User{Name: "John", ID: i + 1, Slug: fmt.Sprintf("John%d", i+1), DateOfBirth: time.Now().Add(-time.Duration(i*10) * time.Minute)}
|
||||
err := db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
var users []User
|
||||
|
||||
err := db.All(&users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 100, users[99].ID)
|
||||
|
||||
var users2 []*User
|
||||
|
||||
err = db.All(&users2)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users2, 100)
|
||||
assert.Equal(t, 1, users2[0].ID)
|
||||
assert.Equal(t, 100, users2[99].ID)
|
||||
|
||||
var unknowns []UserWithNoID
|
||||
err = db.All(&unknowns)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNoID, err)
|
||||
|
||||
err = db.Save(&NestedID{
|
||||
ToEmbed: ToEmbed{ID: "id1"},
|
||||
Name: "John",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Save(&NestedID{
|
||||
ToEmbed: ToEmbed{ID: "id2"},
|
||||
Name: "Mike",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
db.Save(&NestedID{
|
||||
ToEmbed: ToEmbed{ID: "id3"},
|
||||
Name: "Steve",
|
||||
})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var nested []NestedID
|
||||
err = db.All(&nested)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, nested, 3)
|
||||
|
||||
err = db.All(&users, Limit(10), Skip(10))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 10)
|
||||
assert.Equal(t, 11, users[0].ID)
|
||||
assert.Equal(t, 20, users[9].ID)
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package storm
|
||||
|
||||
import "github.com/boltdb/bolt"
|
||||
|
||||
// CreateBucketIfNotExists creates the bucket below the current node if it doesn't
|
||||
// already exist.
|
||||
func (n *Node) CreateBucketIfNotExists(tx *bolt.Tx, bucket string) (*bolt.Bucket, error) {
|
||||
var b *bolt.Bucket
|
||||
var err error
|
||||
|
||||
bucketNames := append(n.rootBucket, bucket)
|
||||
|
||||
for _, bucketName := range bucketNames {
|
||||
if b != nil {
|
||||
if b, err = b.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
} else {
|
||||
if b, err = tx.CreateBucketIfNotExists([]byte(bucketName)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// GetBucket returns the given bucket below the current node.
|
||||
func (n *Node) GetBucket(tx *bolt.Tx, children ...string) *bolt.Bucket {
|
||||
var b *bolt.Bucket
|
||||
|
||||
bucketNames := append(n.rootBucket, children...)
|
||||
for _, bucketName := range bucketNames {
|
||||
if b != nil {
|
||||
if b = b.Bucket([]byte(bucketName)); b == nil {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if b = tx.Bucket([]byte(bucketName)); b == nil {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return b
|
||||
}
|
|
@ -0,0 +1,73 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestBucket(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
defer db.Close()
|
||||
|
||||
// Read tx
|
||||
readTx, err := db.Bolt.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Nil(t, db.root.GetBucket(readTx, "none"))
|
||||
|
||||
b, err := db.root.CreateBucketIfNotExists(readTx, "new")
|
||||
|
||||
// Cannot create buckets in a read transaction
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, b)
|
||||
|
||||
// Read transactions in Bolt needs a rollback and not a commit
|
||||
readTx.Rollback()
|
||||
|
||||
// End read tx
|
||||
|
||||
// Write tx
|
||||
writeTx, err := db.Bolt.Begin(true)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Nil(t, db.root.GetBucket(writeTx, "none"))
|
||||
|
||||
b, err = db.root.CreateBucketIfNotExists(writeTx, "new")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, b)
|
||||
|
||||
n2 := db.From("a", "b")
|
||||
b, err = n2.CreateBucketIfNotExists(writeTx, "c")
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, b)
|
||||
|
||||
writeTx.Commit()
|
||||
|
||||
// End write tx
|
||||
|
||||
// Read tx
|
||||
readTx, err = db.Bolt.Begin(false)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.NotNil(t, db.root.GetBucket(readTx, "new"))
|
||||
assert.Nil(t, db.root.GetBucket(readTx, "c"))
|
||||
assert.NotNil(t, n2.GetBucket(readTx, "c"))
|
||||
|
||||
readTx.Rollback()
|
||||
// End read tx
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package storm
|
||||
|
||||
// Close the database
|
||||
func (s *DB) Close() error {
|
||||
return s.Bolt.Close()
|
||||
}
|
|
@ -0,0 +1,6 @@
|
|||
package storm
|
||||
|
||||
import "github.com/asdine/storm/codec/gob"
|
||||
|
||||
// Defaults to gob
|
||||
var defaultCodec = gob.Codec
|
|
@ -0,0 +1,9 @@
|
|||
// Package codec contains sub-packages with different codecs that can be used
|
||||
// to encode and decode entities in Storm.
|
||||
package codec
|
||||
|
||||
// EncodeDecoder represents a codec used to encode and decode entities.
|
||||
type EncodeDecoder interface {
|
||||
Encode(v interface{}) ([]byte, error)
|
||||
Decode(b []byte, v interface{}) error
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package codec_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/asdine/storm"
|
||||
"github.com/asdine/storm/codec/gob"
|
||||
"github.com/asdine/storm/codec/json"
|
||||
"github.com/asdine/storm/codec/protobuf"
|
||||
"github.com/asdine/storm/codec/sereal"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// The examples below show how to set up all the codecs shipped with Storm.
|
||||
// Proper error handling left out to make it simple.
|
||||
var gobDb, _ = storm.Open("gob.db", storm.Codec(gob.Codec))
|
||||
var jsonDb, _ = storm.Open("json.db", storm.Codec(json.Codec))
|
||||
var serealDb, _ = storm.Open("sereal.db", storm.Codec(sereal.Codec))
|
||||
var protobufDb, _ = storm.Open("protobuf.db", storm.Codec(protobuf.Codec))
|
||||
|
||||
fmt.Printf("%T\n", gobDb.Codec)
|
||||
fmt.Printf("%T\n", jsonDb.Codec)
|
||||
fmt.Printf("%T\n", serealDb.Codec)
|
||||
fmt.Printf("%T\n", protobufDb.Codec)
|
||||
|
||||
// Output:
|
||||
// *gob.gobCodec
|
||||
// *json.jsonCodec
|
||||
// *sereal.serealCodec
|
||||
// *protobuf.protobufCodec
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
package gob
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
)
|
||||
|
||||
// Codec serializing objects using the gob package.
|
||||
// See https://golang.org/pkg/encoding/gob/
|
||||
var Codec = new(gobCodec)
|
||||
|
||||
type gobCodec int
|
||||
|
||||
func (c gobCodec) Encode(v interface{}) ([]byte, error) {
|
||||
var b bytes.Buffer
|
||||
enc := gob.NewEncoder(&b)
|
||||
err := enc.Encode(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func (c gobCodec) Decode(b []byte, v interface{}) error {
|
||||
r := bytes.NewReader(b)
|
||||
dec := gob.NewDecoder(r)
|
||||
return dec.Decode(v)
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package gob
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm/codec/internal"
|
||||
)
|
||||
|
||||
func TestGob(t *testing.T) {
|
||||
internal.RoundtripTester(t, Codec)
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm/codec"
|
||||
)
|
||||
|
||||
type testStruct struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
// RoundtripTester is a test helper to test a EncodeDecoder
|
||||
func RoundtripTester(t *testing.T, c codec.EncodeDecoder, vals ...interface{}) {
|
||||
var val, to interface{}
|
||||
if len(vals) > 0 {
|
||||
if len(vals) != 2 {
|
||||
panic("Wrong number of vals, expected 2")
|
||||
}
|
||||
val = vals[0]
|
||||
to = vals[1]
|
||||
} else {
|
||||
val = &testStruct{Name: "test"}
|
||||
to = &testStruct{}
|
||||
}
|
||||
|
||||
encoded, err := c.Encode(val)
|
||||
if err != nil {
|
||||
t.Fatal("Encode error:", err)
|
||||
}
|
||||
err = c.Decode(encoded, to)
|
||||
if err != nil {
|
||||
t.Fatal("Decode error:", err)
|
||||
}
|
||||
if !reflect.DeepEqual(val, to) {
|
||||
t.Fatalf("Roundtrip codec mismatch, expected\n%#v\ngot\n%#v", val, to)
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(&testStruct{})
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// Codec that encodes to and decodes from JSON.
|
||||
var Codec = new(jsonCodec)
|
||||
|
||||
type jsonCodec int
|
||||
|
||||
func (j jsonCodec) Encode(v interface{}) ([]byte, error) {
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (j jsonCodec) Decode(b []byte, v interface{}) error {
|
||||
return json.Unmarshal(b, v)
|
||||
}
|
|
@ -0,0 +1,11 @@
|
|||
package json
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm/codec/internal"
|
||||
)
|
||||
|
||||
func TestJSON(t *testing.T) {
|
||||
internal.RoundtripTester(t, Codec)
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
package protobuf
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
)
|
||||
|
||||
// More details on Protocol Buffers https://github.com/golang/protobuf
|
||||
var (
|
||||
Codec = new(protobufCodec)
|
||||
errNotProtocolBufferMessage = errors.New("value isn't a Protocol Buffers Message")
|
||||
)
|
||||
|
||||
type protobufCodec int
|
||||
|
||||
func (c protobufCodec) Encode(v interface{}) ([]byte, error) {
|
||||
message, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return nil, errNotProtocolBufferMessage
|
||||
}
|
||||
return proto.Marshal(message)
|
||||
}
|
||||
|
||||
func (c protobufCodec) Decode(b []byte, v interface{}) error {
|
||||
message, ok := v.(proto.Message)
|
||||
if !ok {
|
||||
return errNotProtocolBufferMessage
|
||||
}
|
||||
return proto.Unmarshal(b, message)
|
||||
}
|
|
@ -0,0 +1,19 @@
|
|||
package protobuf
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/asdine/storm/codec/internal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestProtobuf(t *testing.T) {
|
||||
u1 := SimpleUser{
|
||||
Id: proto.Uint64(1),
|
||||
Name: proto.String("John"),
|
||||
}
|
||||
u2 := SimpleUser{}
|
||||
internal.RoundtripTester(t, Codec, &u1, &u2)
|
||||
assert.True(t, u1.GetId() == u2.GetId())
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
// Code generated by protoc-gen-go.
|
||||
// source: simple_user.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package protobuf is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
simple_user.proto
|
||||
|
||||
It has these top-level messages:
|
||||
SimpleUser
|
||||
*/
|
||||
package protobuf
|
||||
|
||||
import proto "github.com/golang/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.ProtoPackageIsVersion1
|
||||
|
||||
type SimpleUser struct {
|
||||
Id *uint64 `protobuf:"varint,1,req,name=id" json:"id,omitempty"`
|
||||
Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
|
||||
Age *int32 `protobuf:"varint,3,opt,name=age,def=0" json:"age,omitempty"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
}
|
||||
|
||||
func (m *SimpleUser) Reset() { *m = SimpleUser{} }
|
||||
func (m *SimpleUser) String() string { return proto.CompactTextString(m) }
|
||||
func (*SimpleUser) ProtoMessage() {}
|
||||
func (*SimpleUser) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
|
||||
|
||||
const Default_SimpleUser_Age int32 = 0
|
||||
|
||||
func (m *SimpleUser) GetId() uint64 {
|
||||
if m != nil && m.Id != nil {
|
||||
return *m.Id
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *SimpleUser) GetName() string {
|
||||
if m != nil && m.Name != nil {
|
||||
return *m.Name
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *SimpleUser) GetAge() int32 {
|
||||
if m != nil && m.Age != nil {
|
||||
return *m.Age
|
||||
}
|
||||
return Default_SimpleUser_Age
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*SimpleUser)(nil), "protobuf.SimpleUser")
|
||||
}
|
||||
|
||||
var fileDescriptor0 = []byte{
|
||||
// 100 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0x12, 0x2c, 0xce, 0xcc, 0x2d,
|
||||
0xc8, 0x49, 0x8d, 0x2f, 0x2d, 0x4e, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x00,
|
||||
0x53, 0x49, 0xa5, 0x69, 0x4a, 0x66, 0x5c, 0x5c, 0xc1, 0x60, 0xe9, 0x50, 0xa0, 0xac, 0x10, 0x17,
|
||||
0x17, 0x53, 0x66, 0x8a, 0x04, 0xa3, 0x02, 0x93, 0x06, 0x8b, 0x10, 0x0f, 0x17, 0x4b, 0x5e, 0x62,
|
||||
0x6e, 0xaa, 0x04, 0x13, 0x90, 0xc7, 0x29, 0xc4, 0xc7, 0xc5, 0x9c, 0x98, 0x9e, 0x2a, 0xc1, 0xac,
|
||||
0xc0, 0xa8, 0xc1, 0x6a, 0xc5, 0x68, 0x00, 0x08, 0x00, 0x00, 0xff, 0xff, 0x64, 0x21, 0x66, 0x4d,
|
||||
0x55, 0x00, 0x00, 0x00,
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
package protobuf;
|
||||
|
||||
message SimpleUser {
|
||||
required uint64 id = 1;
|
||||
required string name = 2;
|
||||
optional int32 age = 3 [default=0];
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"github.com/Sereal/Sereal/Go/sereal"
|
||||
)
|
||||
|
||||
// The Sereal codec has some interesting features, one of them being
|
||||
// serialization of object references, including circular references.
|
||||
// See https://github.com/Sereal/Sereal
|
||||
var Codec = new(serealCodec)
|
||||
|
||||
type serealCodec int
|
||||
|
||||
func (c serealCodec) Encode(v interface{}) ([]byte, error) {
|
||||
return sereal.Marshal(v)
|
||||
}
|
||||
|
||||
func (c serealCodec) Decode(b []byte, v interface{}) error {
|
||||
return sereal.Unmarshal(b, v)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package sereal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm/codec/internal"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
type SerealUser struct {
|
||||
Name string
|
||||
Self *SerealUser
|
||||
}
|
||||
|
||||
func TestSereal(t *testing.T) {
|
||||
u1 := &SerealUser{Name: "Sereal"}
|
||||
u1.Self = u1 // cyclic ref
|
||||
u2 := &SerealUser{}
|
||||
internal.RoundtripTester(t, Codec, &u1, &u2)
|
||||
assert.True(t, u2 == u2.Self)
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"reflect"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCodec(t *testing.T) {
|
||||
u1 := &SimpleUser{Name: "John"}
|
||||
encoded, err := defaultCodec.Encode(u1)
|
||||
assert.Nil(t, err)
|
||||
u2 := &SimpleUser{}
|
||||
err = defaultCodec.Decode(encoded, u2)
|
||||
assert.Nil(t, err)
|
||||
if !reflect.DeepEqual(u1, u2) {
|
||||
t.Fatal("Codec mismatch")
|
||||
}
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// Count counts all the records of a bucket
|
||||
func (n *Node) Count(data interface{}) (int, error) {
|
||||
if !structs.IsStruct(data) {
|
||||
return 0, ErrBadType
|
||||
}
|
||||
|
||||
info, err := extract(data)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
var count int
|
||||
if n.tx != nil {
|
||||
err = n.count(n.tx, info, &count)
|
||||
return count, err
|
||||
}
|
||||
|
||||
err = n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return n.count(tx, info, &count)
|
||||
})
|
||||
return count, err
|
||||
}
|
||||
|
||||
func (n *Node) count(tx *bolt.Tx, info *modelInfo, count *int) error {
|
||||
bucket := n.GetBucket(tx, info.Name)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("bucket %s not found", info.Name)
|
||||
}
|
||||
|
||||
*count = 0
|
||||
c := bucket.Cursor()
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
(*count)++
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Count counts all the records of a bucket
|
||||
func (s *DB) Count(data interface{}) (int, error) {
|
||||
return s.root.Count(data)
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestCount(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
w := User{Name: "John", ID: i + 1, Slug: fmt.Sprintf("John%d", i+1), DateOfBirth: time.Now().Add(-time.Duration(i*10) * time.Minute)}
|
||||
err := db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
count, err := db.Count(&User{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 100, count)
|
||||
|
||||
w := User{Name: "John", ID: 101, Slug: fmt.Sprintf("John%d", 101), DateOfBirth: time.Now().Add(-time.Duration(101*10) * time.Minute)}
|
||||
err = db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
|
||||
count, err = db.Count(&User{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 101, count)
|
||||
|
||||
tx, err := db.Begin(true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
count, err = tx.Count(&User{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 101, count)
|
||||
|
||||
w = User{Name: "John", ID: 102, Slug: fmt.Sprintf("John%d", 102), DateOfBirth: time.Now().Add(-time.Duration(101*10) * time.Minute)}
|
||||
err = tx.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
|
||||
count, err = tx.Count(&User{})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 102, count)
|
||||
|
||||
tx.Commit()
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
package storm
|
||||
|
||||
import "github.com/boltdb/bolt"
|
||||
|
||||
// Delete deletes a key from a bucket
|
||||
func (n *Node) Delete(bucketName string, key interface{}) error {
|
||||
id, err := toBytes(key, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.delete(n.tx, bucketName, id)
|
||||
}
|
||||
|
||||
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return n.delete(tx, bucketName, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) delete(tx *bolt.Tx, bucketName string, id []byte) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return bucket.Delete(id)
|
||||
}
|
||||
|
||||
// Delete deletes a key from a bucket
|
||||
func (s *DB) Delete(bucketName string, key interface{}) error {
|
||||
return s.root.Delete(bucketName, key)
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDelete(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
err := db.Set("files", "myfile.csv", "a,b,c,d")
|
||||
assert.NoError(t, err)
|
||||
err = db.Delete("files", "myfile.csv")
|
||||
assert.NoError(t, err)
|
||||
err = db.Delete("files", "myfile.csv")
|
||||
assert.NoError(t, err)
|
||||
err = db.Delete("i don't exist", "myfile.csv")
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
err = db.Delete("", nil)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
}
|
|
@ -0,0 +1,28 @@
|
|||
package storm
|
||||
|
||||
import "github.com/boltdb/bolt"
|
||||
|
||||
// Drop a bucket
|
||||
func (n *Node) Drop(bucketName string) error {
|
||||
if n.tx != nil {
|
||||
return n.drop(n.tx, bucketName)
|
||||
}
|
||||
|
||||
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return n.drop(tx, bucketName)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) drop(tx *bolt.Tx, bucketName string) error {
|
||||
bucket := n.GetBucket(tx)
|
||||
if bucket == nil {
|
||||
return tx.DeleteBucket([]byte(bucketName))
|
||||
}
|
||||
|
||||
return bucket.DeleteBucket([]byte(bucketName))
|
||||
}
|
||||
|
||||
// Drop a bucket
|
||||
func (s *DB) Drop(bucketName string) error {
|
||||
return s.root.Drop(bucketName)
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDrop(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
n := db.From("b1", "b2", "b3")
|
||||
err := n.Save(&SimpleUser{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.From("b1").Drop("b2")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.From("b1").Drop("b2")
|
||||
assert.Error(t, err)
|
||||
|
||||
n.From("b4").Drop("b5")
|
||||
assert.Error(t, err)
|
||||
|
||||
err = db.Drop("b1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
d := db.WithTransaction(tx)
|
||||
n := d.From("a1")
|
||||
err = n.Save(&SimpleUser{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = d.Drop("a1")
|
||||
assert.NoError(t, err)
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package storm
|
||||
|
||||
import "errors"
|
||||
|
||||
// Errors
|
||||
var (
|
||||
// ErrNoID is returned when no ID field or id tag is found in the struct.
|
||||
ErrNoID = errors.New("missing struct tag id or ID field")
|
||||
|
||||
// ErrZeroID is returned when the ID field is a zero value.
|
||||
ErrZeroID = errors.New("id field must not be a zero value")
|
||||
|
||||
// ErrBadType is returned when a method receives an unexpected value type.
|
||||
ErrBadType = errors.New("provided data must be a struct or a pointer to struct")
|
||||
|
||||
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
|
||||
// ErrNilParam is returned when the specified param is expected to be not nil.
|
||||
ErrNilParam = errors.New("param must not be nil")
|
||||
|
||||
// ErrUnknownTag is returned when an unexpected tag is specified.
|
||||
ErrUnknownTag = errors.New("unknown tag")
|
||||
|
||||
// ErrIdxNotFound is returned when the specified index is not found.
|
||||
ErrIdxNotFound = errors.New("index not found")
|
||||
|
||||
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to slice.
|
||||
ErrSlicePtrNeeded = errors.New("provided target must be a pointer to slice")
|
||||
|
||||
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer to struct.
|
||||
ErrStructPtrNeeded = errors.New("provided target must be a pointer to struct")
|
||||
|
||||
// ErrSlicePtrNeeded is returned when an unexpected value is given, instead of a pointer.
|
||||
ErrPtrNeeded = errors.New("provided target must be a pointer to a valid variable")
|
||||
|
||||
// ErrNoName is returned when the specified struct has no name.
|
||||
ErrNoName = errors.New("provided target must have a name")
|
||||
|
||||
// ErrNotFound is returned when the specified record is not saved in the bucket.
|
||||
ErrNotFound = errors.New("not found")
|
||||
|
||||
// ErrNotInTransaction is returned when trying to rollback or commit when not in transaction.
|
||||
ErrNotInTransaction = errors.New("not in transaction")
|
||||
)
|
|
@ -0,0 +1,391 @@
|
|||
package storm_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/asdine/storm"
|
||||
)
|
||||
|
||||
func ExampleDB_Save() {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
type User struct {
|
||||
ID int `storm:"id"`
|
||||
Group string `storm:"index"`
|
||||
Email string `storm:"unique"`
|
||||
Name string
|
||||
Age int `storm:"index"`
|
||||
CreatedAt time.Time `storm:"index"`
|
||||
}
|
||||
|
||||
// Open takes an optional list of options as the last argument.
|
||||
// AutoIncrement will auto-increment integer IDs without existing values.
|
||||
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement())
|
||||
defer db.Close()
|
||||
|
||||
user := User{
|
||||
Group: "staff",
|
||||
Email: "john@provider.com",
|
||||
Name: "John",
|
||||
Age: 21,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
|
||||
err := db.Save(&user)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
user2 := user
|
||||
user2.ID = 0
|
||||
|
||||
// Save will fail because of the unique constraint on Email
|
||||
err = db.Save(&user2)
|
||||
fmt.Println(err)
|
||||
|
||||
// Output:
|
||||
// already exists
|
||||
}
|
||||
|
||||
func ExampleDB_One() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var user User
|
||||
|
||||
err := db.One("Email", "john@provider.com", &user)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
// One only works for indexed fields.
|
||||
err = db.One("Name", "John", &user)
|
||||
fmt.Println(err)
|
||||
|
||||
// Output:
|
||||
// not found
|
||||
}
|
||||
|
||||
func ExampleDB_Find() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var users []User
|
||||
err := db.Find("Group", "staff", &users)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Found", len(users))
|
||||
|
||||
// Output:
|
||||
// Found 3
|
||||
}
|
||||
|
||||
func ExampleDB_All() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var users []User
|
||||
err := db.All(&users)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Found", len(users))
|
||||
|
||||
// Output:
|
||||
// Found 3
|
||||
}
|
||||
|
||||
func ExampleDB_AllByIndex() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var users []User
|
||||
err := db.AllByIndex("CreatedAt", &users)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Found", len(users))
|
||||
|
||||
// Output:
|
||||
// Found 3
|
||||
}
|
||||
|
||||
func ExampleDB_Range() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var users []User
|
||||
err := db.Range("Age", 21, 22, &users)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Found", len(users))
|
||||
|
||||
// Output:
|
||||
// Found 2
|
||||
}
|
||||
|
||||
func ExampleLimit() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var users []User
|
||||
err := db.All(&users, storm.Limit(2))
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Found", len(users))
|
||||
|
||||
// Output:
|
||||
// Found 2
|
||||
}
|
||||
|
||||
func ExampleSkip() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var users []User
|
||||
err := db.All(&users, storm.Skip(1))
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Found", len(users))
|
||||
|
||||
// Output:
|
||||
// Found 2
|
||||
}
|
||||
|
||||
func ExampleDB_Remove() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
var user User
|
||||
|
||||
err := db.One("ID", 1, &user)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = db.Remove(user)
|
||||
fmt.Println(err)
|
||||
|
||||
// Output:
|
||||
// <nil>
|
||||
}
|
||||
|
||||
func ExampleDB_Begin() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
// both start out with a balance of 10000 cents
|
||||
var account1, account2 Account
|
||||
|
||||
tx, err := db.Begin(true)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = tx.One("ID", 1, &account1)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = tx.One("ID", 2, &account2)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
account1.Amount -= 1000
|
||||
account2.Amount += 1000
|
||||
|
||||
err = tx.Save(account1)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = tx.Save(account2)
|
||||
|
||||
if err != nil {
|
||||
tx.Rollback()
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
tx.Commit()
|
||||
|
||||
var account1Reloaded, account2Reloaded Account
|
||||
|
||||
err = db.One("ID", 1, &account1Reloaded)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = db.One("ID", 2, &account2Reloaded)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println("Amount in account 1:", account1Reloaded.Amount)
|
||||
fmt.Println("Amount in account 2:", account2Reloaded.Amount)
|
||||
|
||||
// Output:
|
||||
// Amount in account 1: 9000
|
||||
// Amount in account 2: 11000
|
||||
}
|
||||
|
||||
func ExampleDB_From() {
|
||||
dir, db := prepareDB()
|
||||
defer os.RemoveAll(dir)
|
||||
defer db.Close()
|
||||
|
||||
// Create some sub buckets to partition the data.
|
||||
privateNotes := db.From("notes", "private")
|
||||
workNotes := db.From("notes", "work")
|
||||
|
||||
err := privateNotes.Save(&Note{ID: "private1", Text: "This is some private text."})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = workNotes.Save(&Note{ID: "work1", Text: "Work related."})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var privateNote, workNote, personalNote Note
|
||||
|
||||
err = privateNotes.One("ID", "work1", &workNote)
|
||||
|
||||
// Not found: Wrong bucket.
|
||||
fmt.Println(err)
|
||||
|
||||
err = workNotes.One("ID", "work1", &workNote)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = privateNotes.One("ID", "private1", &privateNote)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(workNote.Text)
|
||||
fmt.Println(privateNote.Text)
|
||||
|
||||
// These can be nested further if needed:
|
||||
personalNotes := privateNotes.From("personal")
|
||||
err = personalNotes.Save(&Note{ID: "personal1", Text: "This is some very personal text."})
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
err = personalNotes.One("ID", "personal1", &personalNote)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
fmt.Println(personalNote.Text)
|
||||
|
||||
// Output:
|
||||
// not found
|
||||
// Work related.
|
||||
// This is some private text.
|
||||
// This is some very personal text.
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int `storm:"id"`
|
||||
Group string `storm:"index"`
|
||||
Email string `storm:"unique"`
|
||||
Name string
|
||||
Age int `storm:"index"`
|
||||
CreatedAt time.Time `storm:"index"`
|
||||
}
|
||||
|
||||
type Account struct {
|
||||
ID int `storm:"id"`
|
||||
Amount int64 // amount in cents
|
||||
}
|
||||
|
||||
type Note struct {
|
||||
ID string `storm:"id"`
|
||||
Text string
|
||||
}
|
||||
|
||||
func prepareDB() (string, *storm.DB) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
db, _ := storm.Open(filepath.Join(dir, "storm.db"), storm.AutoIncrement())
|
||||
|
||||
for i, name := range []string{"John", "Eric", "Dilbert"} {
|
||||
email := strings.ToLower(name + "@provider.com")
|
||||
user := User{
|
||||
Group: "staff",
|
||||
Email: email,
|
||||
Name: name,
|
||||
Age: 21 + i,
|
||||
CreatedAt: time.Now(),
|
||||
}
|
||||
err := db.Save(&user)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := int64(0); i < 10; i++ {
|
||||
account := Account{Amount: 10000}
|
||||
|
||||
err := db.Save(&account)
|
||||
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
return dir, db
|
||||
|
||||
}
|
|
@ -0,0 +1,166 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// Storm tags
|
||||
const (
|
||||
tagID = "id"
|
||||
tagIdx = "index"
|
||||
tagUniqueIdx = "unique"
|
||||
tagInline = "inline"
|
||||
indexPrefix = "__storm_index_"
|
||||
)
|
||||
|
||||
type indexInfo struct {
|
||||
Type string
|
||||
Field *structs.Field
|
||||
}
|
||||
|
||||
// modelInfo is a structure gathering all the relevant informations about a model
|
||||
type modelInfo struct {
|
||||
Name string
|
||||
Indexes map[string]indexInfo
|
||||
ID identInfo
|
||||
data interface{}
|
||||
}
|
||||
|
||||
func (m *modelInfo) AddIndex(f *structs.Field, indexType string, override bool) {
|
||||
fieldName := f.Name()
|
||||
if _, ok := m.Indexes[fieldName]; !ok || override {
|
||||
m.Indexes[fieldName] = indexInfo{
|
||||
Type: indexType,
|
||||
Field: f,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *modelInfo) AllByType(indexType string) []indexInfo {
|
||||
var idx []indexInfo
|
||||
for k := range m.Indexes {
|
||||
if m.Indexes[k].Type == indexType {
|
||||
idx = append(idx, m.Indexes[k])
|
||||
}
|
||||
}
|
||||
|
||||
return idx
|
||||
}
|
||||
|
||||
func extract(data interface{}, mi ...*modelInfo) (*modelInfo, error) {
|
||||
s := structs.New(data)
|
||||
fields := s.Fields()
|
||||
|
||||
var child bool
|
||||
|
||||
var m *modelInfo
|
||||
if len(mi) > 0 {
|
||||
m = mi[0]
|
||||
child = true
|
||||
} else {
|
||||
m = &modelInfo{}
|
||||
m.Indexes = make(map[string]indexInfo)
|
||||
m.data = data
|
||||
}
|
||||
|
||||
if m.Name == "" {
|
||||
m.Name = s.Name()
|
||||
}
|
||||
|
||||
for _, f := range fields {
|
||||
if !f.IsExported() {
|
||||
continue
|
||||
}
|
||||
|
||||
err := extractField(f, m, child)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
// ID field or tag detected
|
||||
if m.ID.Field != nil {
|
||||
if m.ID.Field.IsZero() {
|
||||
m.ID.IsZero = true
|
||||
} else {
|
||||
m.ID.Value = m.ID.Field.Value()
|
||||
}
|
||||
}
|
||||
|
||||
if child {
|
||||
return m, nil
|
||||
}
|
||||
|
||||
if m.ID.Field == nil {
|
||||
return nil, ErrNoID
|
||||
}
|
||||
|
||||
if m.Name == "" {
|
||||
return nil, ErrNoName
|
||||
}
|
||||
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func extractField(f *structs.Field, m *modelInfo, isChild bool) error {
|
||||
tag := f.Tag("storm")
|
||||
if tag != "" {
|
||||
switch tag {
|
||||
case "id":
|
||||
m.ID.Field = f
|
||||
case tagUniqueIdx, tagIdx:
|
||||
m.AddIndex(f, tag, !isChild)
|
||||
case tagInline:
|
||||
if structs.IsStruct(f.Value()) {
|
||||
_, err := extract(f.Value(), m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
return ErrUnknownTag
|
||||
}
|
||||
}
|
||||
|
||||
// the field is named ID and no ID field has been detected before
|
||||
if f.Name() == "ID" && m.ID.Field == nil {
|
||||
m.ID.Field = f
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prefill the most requested informations
|
||||
type identInfo struct {
|
||||
Field *structs.Field
|
||||
IsZero bool
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (i *identInfo) Type() reflect.Type {
|
||||
return reflect.TypeOf(i.Field.Value())
|
||||
}
|
||||
|
||||
func (i *identInfo) IsOfIntegerFamily() bool {
|
||||
return i.Field != nil && i.Field.Kind() >= reflect.Int && i.Field.Kind() <= reflect.Uint64
|
||||
}
|
||||
|
||||
func getIndex(bucket *bolt.Bucket, idxKind string, fieldName string) (index.Index, error) {
|
||||
var idx index.Index
|
||||
var err error
|
||||
|
||||
switch idxKind {
|
||||
case tagUniqueIdx:
|
||||
idx, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
case tagIdx:
|
||||
idx, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
default:
|
||||
err = ErrIdxNotFound
|
||||
}
|
||||
|
||||
return idx, err
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtractNoTags(t *testing.T) {
|
||||
s := ClassicNoTags{}
|
||||
_, err := extract(&s)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNoID, err)
|
||||
}
|
||||
|
||||
func TestExtractBadTags(t *testing.T) {
|
||||
s := ClassicBadTags{}
|
||||
infos, err := extract(&s)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrUnknownTag, err)
|
||||
assert.Nil(t, infos)
|
||||
}
|
||||
|
||||
func TestExtractUniqueTags(t *testing.T) {
|
||||
s := ClassicUnique{ID: "id"}
|
||||
infos, err := extract(&s)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, infos)
|
||||
assert.NotNil(t, infos.ID)
|
||||
assert.False(t, infos.ID.IsZero)
|
||||
assert.Equal(t, "ClassicUnique", infos.Name)
|
||||
assert.Len(t, infos.AllByType("index"), 0)
|
||||
assert.Len(t, infos.AllByType("unique"), 4)
|
||||
}
|
||||
|
||||
func TestExtractIndexTags(t *testing.T) {
|
||||
s := ClassicIndex{ID: "id"}
|
||||
infos, err := extract(&s)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, infos)
|
||||
assert.NotNil(t, infos.ID)
|
||||
assert.False(t, infos.ID.IsZero)
|
||||
assert.Equal(t, "ClassicIndex", infos.Name)
|
||||
assert.Len(t, infos.AllByType("index"), 5)
|
||||
assert.Len(t, infos.AllByType("unique"), 0)
|
||||
}
|
||||
|
||||
func TestExtractInlineWithIndex(t *testing.T) {
|
||||
s := ClassicInline{ToEmbed: &ToEmbed{ID: "50"}}
|
||||
infos, err := extract(&s)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, infos)
|
||||
assert.NotNil(t, infos.ID)
|
||||
assert.Equal(t, "ClassicInline", infos.Name)
|
||||
assert.Len(t, infos.AllByType("index"), 3)
|
||||
assert.Len(t, infos.AllByType("unique"), 2)
|
||||
}
|
|
@ -0,0 +1,95 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// Find returns one or more records by the specified index
|
||||
func (n *Node) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
|
||||
return ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
typ := reflect.Indirect(ref).Type().Elem()
|
||||
newElem := reflect.New(typ)
|
||||
|
||||
d := structs.New(newElem.Interface())
|
||||
bucketName := d.Name()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
field, ok := d.FieldOk(fieldName)
|
||||
if !ok {
|
||||
return fmt.Errorf("field %s not found", fieldName)
|
||||
}
|
||||
|
||||
tag := field.Tag("storm")
|
||||
if tag == "" {
|
||||
return fmt.Errorf("index %s not found", fieldName)
|
||||
}
|
||||
|
||||
val, err := toBytes(value, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.find(n.tx, bucketName, fieldName, tag, &ref, val, opts)
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return n.find(tx, bucketName, fieldName, tag, &ref, val, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) find(tx *bolt.Tx, bucketName, fieldName, tag string, ref *reflect.Value, val []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("bucket %s not found", bucketName)
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, tag, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.All(val, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
|
||||
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
err = n.s.Codec.Decode(raw, results.Index(i).Addr().Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reflect.Indirect(*ref).Set(results)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Find returns one or more records by the specified index
|
||||
func (s *DB) Find(fieldName string, value interface{}, to interface{}, options ...func(q *index.Options)) error {
|
||||
return s.root.Find(fieldName, value, to, options...)
|
||||
}
|
|
@ -0,0 +1,77 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestFind(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
w := User{Name: "John", ID: i + 1, Slug: fmt.Sprintf("John%d", i+1)}
|
||||
err := db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
err := db.Find("Name", "John", &User{})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrSlicePtrNeeded, err)
|
||||
|
||||
err = db.Find("Name", "John", &[]struct {
|
||||
Name string
|
||||
ID int
|
||||
}{})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNoName, err)
|
||||
|
||||
notTheRightUsers := []UniqueNameUser{}
|
||||
|
||||
err = db.Find("Name", "John", ¬TheRightUsers)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "bucket UniqueNameUser not found")
|
||||
|
||||
users := []User{}
|
||||
|
||||
err = db.Find("Age", "John", &users)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "field Age not found")
|
||||
|
||||
err = db.Find("DateOfBirth", "John", &users)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.Find("Group", "John", &users)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "index Group not found")
|
||||
|
||||
err = db.Find("Name", "John", &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 100)
|
||||
assert.Equal(t, 1, users[0].ID)
|
||||
assert.Equal(t, 100, users[99].ID)
|
||||
|
||||
users = []User{}
|
||||
err = db.Find("Slug", "John10", &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 1)
|
||||
assert.Equal(t, 10, users[0].ID)
|
||||
|
||||
users = []User{}
|
||||
err = db.Find("Name", nil, &users)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.Find("Name", "John", &users, Limit(10), Skip(20))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 10)
|
||||
assert.Equal(t, 21, users[0].ID)
|
||||
assert.Equal(t, 30, users[9].ID)
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// Get a value from a bucket
|
||||
func (n *Node) Get(bucketName string, key interface{}, to interface{}) error {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || ref.Kind() != reflect.Ptr {
|
||||
return ErrPtrNeeded
|
||||
}
|
||||
|
||||
id, err := toBytes(key, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.get(n.tx, bucketName, id, to)
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return n.get(tx, bucketName, id, to)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) get(tx *bolt.Tx, bucketName string, id []byte, to interface{}) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return n.s.Codec.Decode(raw, to)
|
||||
}
|
||||
|
||||
// Get a value from a bucket
|
||||
func (s *DB) Get(bucketName string, key interface{}, to interface{}) error {
|
||||
return s.root.Get(bucketName, key, to)
|
||||
}
|
|
@ -0,0 +1,53 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGet(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
err := db.Set("trash", 10, 100)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var nb int
|
||||
err = db.Get("trash", 10, &nb)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 100, nb)
|
||||
|
||||
tm := time.Now()
|
||||
err = db.Set("logs", tm, "I'm hungry")
|
||||
assert.NoError(t, err)
|
||||
|
||||
var message string
|
||||
err = db.Get("logs", tm, &message)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "I'm hungry", message)
|
||||
|
||||
var hand int
|
||||
err = db.Get("wallet", "100 bucks", &hand)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.Set("wallet", "10 bucks", 10)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Get("wallet", "100 bucks", &hand)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.Get("logs", tm, nil)
|
||||
assert.Equal(t, ErrPtrNeeded, err)
|
||||
|
||||
err = db.Get("", nil, nil)
|
||||
assert.Equal(t, ErrPtrNeeded, err)
|
||||
|
||||
err = db.Get("", "100 bucks", &hand)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
}
|
|
@ -0,0 +1,18 @@
|
|||
package storm
|
||||
|
||||
import "github.com/asdine/storm/codec"
|
||||
|
||||
// toBytes turns an interface into a slice of bytes
|
||||
func toBytes(key interface{}, encoder codec.EncodeDecoder) ([]byte, error) {
|
||||
if key == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if k, ok := key.([]byte); ok {
|
||||
return k, nil
|
||||
}
|
||||
if k, ok := key.(string); ok {
|
||||
return []byte(k), nil
|
||||
}
|
||||
|
||||
return encoder.Encode(key)
|
||||
}
|
|
@ -0,0 +1,41 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm/codec/gob"
|
||||
"github.com/asdine/storm/codec/json"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestToBytes(t *testing.T) {
|
||||
b, err := toBytes([]byte("a slice of bytes"), gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("a slice of bytes"), b)
|
||||
|
||||
b, err = toBytes("a string", gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("a string"), b)
|
||||
|
||||
b, err = toBytes(5, gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, b)
|
||||
|
||||
b, err = toBytes([]byte("Hey"), gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("Hey"), b)
|
||||
}
|
||||
|
||||
func TestToBytesWithCodec(t *testing.T) {
|
||||
b, err := toBytes([]byte("a slice of bytes"), json.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("a slice of bytes"), b)
|
||||
|
||||
b, err = toBytes("a string", json.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, []byte("a string"), b)
|
||||
|
||||
b, err = toBytes(&SimpleUser{ID: 10, Name: "John", age: 100}, json.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, `{"ID":10,"Name":"John"}`, string(b))
|
||||
}
|
|
@ -0,0 +1,14 @@
|
|||
package index
|
||||
|
||||
import "errors"
|
||||
|
||||
var (
|
||||
// ErrNotFound is returned when the specified record is not saved in the bucket.
|
||||
ErrNotFound = errors.New("not found")
|
||||
|
||||
// ErrAlreadyExists is returned uses when trying to set an existing value on a field that has a unique index.
|
||||
ErrAlreadyExists = errors.New("already exists")
|
||||
|
||||
// ErrNilParam is returned when the specified param is expected to be not nil.
|
||||
ErrNilParam = errors.New("param must not be nil")
|
||||
)
|
|
@ -0,0 +1,12 @@
|
|||
package index
|
||||
|
||||
// Index interface
|
||||
type Index interface {
|
||||
Add(value []byte, targetID []byte) error
|
||||
Remove(value []byte) error
|
||||
RemoveID(id []byte) error
|
||||
Get(value []byte) []byte
|
||||
All(value []byte, opts *Options) ([][]byte, error)
|
||||
AllRecords(opts *Options) ([][]byte, error)
|
||||
Range(min []byte, max []byte, opts *Options) ([][]byte, error)
|
||||
}
|
|
@ -0,0 +1,183 @@
|
|||
package index
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// NewListIndex loads a ListIndex
|
||||
func NewListIndex(parent *bolt.Bucket, indexName []byte) (*ListIndex, error) {
|
||||
var err error
|
||||
b := parent.Bucket(indexName)
|
||||
if b == nil {
|
||||
if !parent.Writable() {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
b, err = parent.CreateBucket(indexName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
ids, err := NewUniqueIndex(b, []byte("storm__ids"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &ListIndex{
|
||||
IndexBucket: b,
|
||||
Parent: parent,
|
||||
IDs: ids,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ListIndex is an index that references values and the corresponding IDs.
|
||||
type ListIndex struct {
|
||||
Parent *bolt.Bucket
|
||||
IndexBucket *bolt.Bucket
|
||||
IDs *UniqueIndex
|
||||
}
|
||||
|
||||
// Add a value to the list index
|
||||
func (idx *ListIndex) Add(value []byte, targetID []byte) error {
|
||||
if value == nil || len(value) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
if targetID == nil || len(targetID) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
oldValue := idx.IDs.Get(targetID)
|
||||
if oldValue != nil {
|
||||
uni, err := NewUniqueIndex(idx.IndexBucket, oldValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = uni.Remove(targetID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.IDs.Remove(targetID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
uni, err := NewUniqueIndex(idx.IndexBucket, value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = uni.Add(targetID, targetID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return idx.IDs.Add(targetID, value)
|
||||
}
|
||||
|
||||
// Remove a value from the unique index
|
||||
func (idx *ListIndex) Remove(value []byte) error {
|
||||
err := idx.IDs.RemoveID(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return idx.IndexBucket.DeleteBucket(value)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the list index
|
||||
func (idx *ListIndex) RemoveID(targetID []byte) error {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for bucketName, val := c.First(); bucketName != nil; bucketName, val = c.Next() {
|
||||
if val != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
uni, err := NewUniqueIndex(idx.IndexBucket, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = uni.Remove(targetID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return idx.IDs.Remove(targetID)
|
||||
}
|
||||
|
||||
// Get the first ID corresponding to the given value
|
||||
func (idx *ListIndex) Get(value []byte) []byte {
|
||||
uni, err := NewUniqueIndex(idx.IndexBucket, value)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return uni.first()
|
||||
}
|
||||
|
||||
// All the IDs corresponding to the given value
|
||||
func (idx *ListIndex) All(value []byte, opts *Options) ([][]byte, error) {
|
||||
uni, err := NewUniqueIndex(idx.IndexBucket, value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return uni.AllRecords(opts)
|
||||
}
|
||||
|
||||
// AllRecords returns all the IDs of this index
|
||||
func (idx *ListIndex) AllRecords(opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for bucketName, val := c.First(); bucketName != nil; bucketName, val = c.Next() {
|
||||
if val != nil || bytes.Equal(bucketName, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
uni, err := NewUniqueIndex(idx.IndexBucket, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
all, err := uni.AllRecords(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
list = append(list, all...)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Range returns the ids corresponding to the given range of values
|
||||
func (idx *ListIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for bucketName, val := c.Seek(min); bucketName != nil && bytes.Compare(bucketName, max) <= 0; bucketName, val = c.Next() {
|
||||
if val != nil || bytes.Equal(bucketName, []byte("storm__ids")) {
|
||||
continue
|
||||
}
|
||||
|
||||
uni, err := NewUniqueIndex(idx.IndexBucket, bucketName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
all, err := uni.AllRecords(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
list = append(list, all...)
|
||||
}
|
||||
|
||||
return list, nil
|
||||
}
|
|
@ -0,0 +1,127 @@
|
|||
package index_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm"
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestListIndex(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := storm.Open(filepath.Join(dir, "storm.db"))
|
||||
defer db.Close()
|
||||
|
||||
err := db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("test"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
idx, err := index.NewListIndex(b, []byte("lindex1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("goodbye"), []byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add(nil, []byte("id2"))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, index.ErrNilParam, err)
|
||||
|
||||
err = idx.Add([]byte("hi"), nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, index.ErrNilParam, err)
|
||||
|
||||
ids, err := idx.All([]byte("hello"), nil)
|
||||
assert.Len(t, ids, 1)
|
||||
assert.Equal(t, []byte("id1"), ids[0])
|
||||
|
||||
ids, err = idx.All([]byte("goodbye"), nil)
|
||||
assert.Len(t, ids, 1)
|
||||
assert.Equal(t, []byte("id2"), ids[0])
|
||||
|
||||
ids, err = idx.All([]byte("yo"), nil)
|
||||
assert.Nil(t, ids)
|
||||
|
||||
err = idx.RemoveID([]byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
ids, err = idx.All([]byte("goodbye"), nil)
|
||||
assert.Len(t, ids, 0)
|
||||
|
||||
err = idx.RemoveID(nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.RemoveID([]byte("id1"))
|
||||
assert.NoError(t, err)
|
||||
err = idx.RemoveID([]byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
err = idx.RemoveID([]byte("id3"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
ids, err = idx.All([]byte("hello"), nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, ids)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hi"), []byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("yo"), []byte("id3"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.RemoveID([]byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
ids, err = idx.All([]byte("hello"), nil)
|
||||
assert.Len(t, ids, 1)
|
||||
assert.Equal(t, []byte("id1"), ids[0])
|
||||
ids, err = idx.All([]byte("hi"), nil)
|
||||
assert.Len(t, ids, 0)
|
||||
ids, err = idx.All([]byte("yo"), nil)
|
||||
assert.Len(t, ids, 1)
|
||||
assert.Equal(t, []byte("id3"), ids[0])
|
||||
|
||||
err = idx.RemoveID([]byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
err = idx.RemoveID([]byte("id4"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hey"), []byte("id1"))
|
||||
err = idx.Add([]byte("hey"), []byte("id2"))
|
||||
err = idx.Add([]byte("hey"), []byte("id3"))
|
||||
err = idx.Add([]byte("hey"), []byte("id4"))
|
||||
ids, err = idx.All([]byte("hey"), nil)
|
||||
assert.Len(t, ids, 4)
|
||||
|
||||
id := idx.Get([]byte("hey"))
|
||||
assert.Equal(t, []byte("id1"), id)
|
||||
|
||||
idx.Remove([]byte("hey"))
|
||||
ids, err = idx.All([]byte("hey"), nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, ids, 0)
|
||||
|
||||
ids, err = idx.All([]byte("hey"), nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, ids, 0)
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
package index
|
||||
|
||||
// NewOptions creates initialized Options
|
||||
func NewOptions() *Options {
|
||||
return &Options{
|
||||
Limit: -1,
|
||||
Skip: 0,
|
||||
}
|
||||
}
|
||||
|
||||
// Options are used to customize queries
|
||||
type Options struct {
|
||||
Limit int
|
||||
Skip int
|
||||
}
|
|
@ -0,0 +1,145 @@
|
|||
package index
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// NewUniqueIndex loads a UniqueIndex
|
||||
func NewUniqueIndex(parent *bolt.Bucket, indexName []byte) (*UniqueIndex, error) {
|
||||
var err error
|
||||
b := parent.Bucket(indexName)
|
||||
if b == nil {
|
||||
if !parent.Writable() {
|
||||
return nil, ErrNotFound
|
||||
}
|
||||
b, err = parent.CreateBucket(indexName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &UniqueIndex{
|
||||
IndexBucket: b,
|
||||
Parent: parent,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UniqueIndex is an index that references unique values and the corresponding ID.
|
||||
type UniqueIndex struct {
|
||||
Parent *bolt.Bucket
|
||||
IndexBucket *bolt.Bucket
|
||||
}
|
||||
|
||||
// Add a value to the unique index
|
||||
func (idx *UniqueIndex) Add(value []byte, targetID []byte) error {
|
||||
if value == nil || len(value) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
if targetID == nil || len(targetID) == 0 {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
exists := idx.IndexBucket.Get(value)
|
||||
if exists != nil {
|
||||
if bytes.Equal(exists, targetID) {
|
||||
return nil
|
||||
}
|
||||
return ErrAlreadyExists
|
||||
}
|
||||
|
||||
return idx.IndexBucket.Put(value, targetID)
|
||||
}
|
||||
|
||||
// Remove a value from the unique index
|
||||
func (idx *UniqueIndex) Remove(value []byte) error {
|
||||
return idx.IndexBucket.Delete(value)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the unique index
|
||||
func (idx *UniqueIndex) RemoveID(id []byte) error {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
if bytes.Equal(ident, id) {
|
||||
return idx.Remove(val)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the id corresponding to the given value
|
||||
func (idx *UniqueIndex) Get(value []byte) []byte {
|
||||
return idx.IndexBucket.Get(value)
|
||||
}
|
||||
|
||||
// All returns all the ids corresponding to the given value
|
||||
func (idx *UniqueIndex) All(value []byte, opts *Options) ([][]byte, error) {
|
||||
id := idx.IndexBucket.Get(value)
|
||||
if id != nil {
|
||||
return [][]byte{id}, nil
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// AllRecords returns all the IDs of this index
|
||||
func (idx *UniqueIndex) AllRecords(opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// Range returns the ids corresponding to the given range of values
|
||||
func (idx *UniqueIndex) Range(min []byte, max []byte, opts *Options) ([][]byte, error) {
|
||||
var list [][]byte
|
||||
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.Seek(min); val != nil && bytes.Compare(val, max) <= 0; val, ident = c.Next() {
|
||||
if opts != nil && opts.Skip > 0 {
|
||||
opts.Skip--
|
||||
continue
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit == 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if opts != nil && opts.Limit > 0 {
|
||||
opts.Limit--
|
||||
}
|
||||
|
||||
list = append(list, ident)
|
||||
}
|
||||
return list, nil
|
||||
}
|
||||
|
||||
// first returns the first ID of this index
|
||||
func (idx *UniqueIndex) first() []byte {
|
||||
c := idx.IndexBucket.Cursor()
|
||||
|
||||
for val, ident := c.First(); val != nil; val, ident = c.Next() {
|
||||
return ident
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,152 @@
|
|||
package index_test
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm"
|
||||
"github.com/asdine/storm/codec/gob"
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestUniqueIndex(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := storm.Open(filepath.Join(dir, "storm.db"))
|
||||
defer db.Close()
|
||||
|
||||
err := db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("test"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
idx, err := index.NewUniqueIndex(b, []byte("uindex1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id2"))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, index.ErrAlreadyExists, err)
|
||||
|
||||
err = idx.Add(nil, []byte("id2"))
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, index.ErrNilParam, err)
|
||||
|
||||
err = idx.Add([]byte("hi"), nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, index.ErrNilParam, err)
|
||||
|
||||
id := idx.Get([]byte("hello"))
|
||||
assert.Equal(t, []byte("id1"), id)
|
||||
|
||||
id = idx.Get([]byte("goodbye"))
|
||||
assert.Nil(t, id)
|
||||
|
||||
err = idx.Remove([]byte("hello"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Remove(nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
id = idx.Get([]byte("hello"))
|
||||
assert.Nil(t, id)
|
||||
|
||||
err = idx.Add([]byte("hello"), []byte("id1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("hi"), []byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.Add([]byte("yo"), []byte("id3"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = idx.RemoveID([]byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
id = idx.Get([]byte("hello"))
|
||||
assert.Equal(t, []byte("id1"), id)
|
||||
id = idx.Get([]byte("hi"))
|
||||
assert.Nil(t, id)
|
||||
id = idx.Get([]byte("yo"))
|
||||
assert.Equal(t, []byte("id3"), id)
|
||||
ids, err := idx.All([]byte("yo"), nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, ids, 1)
|
||||
assert.Equal(t, []byte("id3"), ids[0])
|
||||
|
||||
err = idx.RemoveID([]byte("id2"))
|
||||
assert.NoError(t, err)
|
||||
err = idx.RemoveID([]byte("id4"))
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestUniqueIndexRange(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := storm.Open(filepath.Join(dir, "storm.db"))
|
||||
defer db.Close()
|
||||
|
||||
db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("test"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
idx, err := index.NewUniqueIndex(b, []byte("uindex1"))
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
val, _ := gob.Codec.Encode(i)
|
||||
err = idx.Add(val, val)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
min, _ := gob.Codec.Encode(3)
|
||||
max, _ := gob.Codec.Encode(5)
|
||||
list, err := idx.Range(min, max, nil)
|
||||
assert.Len(t, list, 3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
min, _ = gob.Codec.Encode(11)
|
||||
max, _ = gob.Codec.Encode(20)
|
||||
list, err = idx.Range(min, max, nil)
|
||||
assert.Len(t, list, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
min, _ = gob.Codec.Encode(7)
|
||||
max, _ = gob.Codec.Encode(2)
|
||||
list, err = idx.Range(min, max, nil)
|
||||
assert.Len(t, list, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
min, _ = gob.Codec.Encode(-5)
|
||||
max, _ = gob.Codec.Encode(2)
|
||||
list, err = idx.Range(min, max, nil)
|
||||
assert.Len(t, list, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
min, _ = gob.Codec.Encode(3)
|
||||
max, _ = gob.Codec.Encode(7)
|
||||
opts := index.NewOptions()
|
||||
opts.Skip = 2
|
||||
list, err = idx.Range(min, max, opts)
|
||||
assert.Len(t, list, 3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
opts.Limit = 2
|
||||
list, err = idx.Range(min, max, opts)
|
||||
assert.Len(t, list, 2)
|
||||
assert.NoError(t, err)
|
||||
return nil
|
||||
})
|
||||
}
|
|
@ -0,0 +1,57 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// Init creates the indexes and buckets for a given structure
|
||||
func (n *Node) Init(data interface{}) error {
|
||||
if !structs.IsStruct(data) {
|
||||
return ErrBadType
|
||||
}
|
||||
|
||||
info, err := extract(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.init(n.tx, info)
|
||||
}
|
||||
|
||||
err = n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return n.init(tx, info)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (n *Node) init(tx *bolt.Tx, info *modelInfo) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, info.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for fieldName, idxInfo := range info.Indexes {
|
||||
switch idxInfo.Type {
|
||||
case tagUniqueIdx:
|
||||
_, err = index.NewUniqueIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
case tagIdx:
|
||||
_, err = index.NewListIndex(bucket, []byte(indexPrefix+fieldName))
|
||||
default:
|
||||
err = ErrIdxNotFound
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Init creates the indexes and buckets for a given structure
|
||||
func (s *DB) Init(data interface{}) error {
|
||||
return s.root.Init(data)
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestInit(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
defer db.Close()
|
||||
|
||||
var u IndexedNameUser
|
||||
err := db.One("Name", "John", &u)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "bucket IndexedNameUser doesn't exist")
|
||||
|
||||
err = db.Init(&u)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.One("Name", "John", &u)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.Init(&ClassicBadTags{})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrUnknownTag, err)
|
||||
|
||||
err = db.Init(10)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrBadType, err)
|
||||
|
||||
err = db.Init(&ClassicNoTags{})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNoID, err)
|
||||
|
||||
err = db.Init(&struct{ ID string }{})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNoName, err)
|
||||
}
|
|
@ -0,0 +1,27 @@
|
|||
package storm
|
||||
|
||||
import "github.com/boltdb/bolt"
|
||||
|
||||
// A Node in Storm represents the API to a BoltDB bucket.
|
||||
type Node struct {
|
||||
s *DB
|
||||
|
||||
// The root bucket. In the normal, simple case this will be empty.
|
||||
rootBucket []string
|
||||
|
||||
// Transaction object. Nil if not in transaction
|
||||
tx *bolt.Tx
|
||||
}
|
||||
|
||||
// From returns a new Storm node with a new bucket root below the current.
|
||||
// All DB operations on the new node will be executed relative to this bucket.
|
||||
func (n Node) From(addend ...string) *Node {
|
||||
n.rootBucket = append(n.rootBucket, addend...)
|
||||
return &n
|
||||
}
|
||||
|
||||
// WithTransaction returns a New Storm node that will use the given transaction.
|
||||
func (n Node) WithTransaction(tx *bolt.Tx) *Node {
|
||||
n.tx = tx
|
||||
return &n
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNode(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"), Root("a"))
|
||||
defer db.Close()
|
||||
|
||||
n1 := db.From("b", "c")
|
||||
assert.Equal(t, db, n1.s)
|
||||
assert.NotEqual(t, db.root, n1)
|
||||
assert.Equal(t, db.root.rootBucket, []string{"a"})
|
||||
assert.Equal(t, []string{"b", "c"}, n1.rootBucket)
|
||||
n2 := n1.From("d", "e")
|
||||
assert.Equal(t, []string{"b", "c", "d", "e"}, n2.rootBucket)
|
||||
}
|
||||
|
||||
func TestNodeWithTransaction(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"), Root("a"))
|
||||
defer db.Close()
|
||||
|
||||
var user User
|
||||
db.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
dbx := db.WithTransaction(tx)
|
||||
err := dbx.Save(&User{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
err = dbx.One("ID", 10, &user)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "John", user.Name)
|
||||
return nil
|
||||
})
|
||||
|
||||
err := db.One("ID", 10, &user)
|
||||
assert.NoError(t, err)
|
||||
|
||||
}
|
|
@ -0,0 +1,80 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// One returns one record by the specified index
|
||||
func (n *Node) One(fieldName string, value interface{}, to interface{}) error {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if !ref.IsValid() || (ref.Kind() != reflect.Ptr && structs.IsStruct(to)) {
|
||||
return ErrStructPtrNeeded
|
||||
}
|
||||
|
||||
if fieldName == "" {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
info, err := extract(to)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
val, err := toBytes(value, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.one(n.tx, fieldName, info, to, val, fieldName == info.ID.Field.Name())
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return n.one(tx, fieldName, info, to, val, fieldName == info.ID.Field.Name())
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) one(tx *bolt.Tx, fieldName string, info *modelInfo, to interface{}, val []byte, skipIndex bool) error {
|
||||
bucket := n.GetBucket(tx, info.Name)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("bucket %s doesn't exist", info.Name)
|
||||
}
|
||||
|
||||
var id []byte
|
||||
if !skipIndex {
|
||||
idxInfo, ok := info.Indexes[fieldName]
|
||||
if !ok {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, idxInfo.Type, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id = idx.Get(val)
|
||||
} else {
|
||||
id = val
|
||||
}
|
||||
|
||||
if id == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return n.s.Codec.Decode(raw, to)
|
||||
}
|
||||
|
||||
// One returns one record by the specified index
|
||||
func (s *DB) One(fieldName string, value interface{}, to interface{}) error {
|
||||
return s.root.One(fieldName, value, to)
|
||||
}
|
|
@ -0,0 +1,108 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestOne(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
defer db.Close()
|
||||
|
||||
u := UniqueNameUser{Name: "John", ID: 10}
|
||||
err := db.Save(&u)
|
||||
assert.NoError(t, err)
|
||||
|
||||
v := UniqueNameUser{}
|
||||
err = db.One("Name", "John", &v)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, u, v)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
w := IndexedNameUser{Name: "John", ID: i + 1}
|
||||
err := db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
x := IndexedNameUser{}
|
||||
err = db.One("Name", "John", &x)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "John", x.Name)
|
||||
assert.Equal(t, 1, x.ID)
|
||||
assert.Zero(t, x.age)
|
||||
assert.True(t, x.DateOfBirth.IsZero())
|
||||
|
||||
err = db.One("Name", "Mike", &x)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.One("", nil, &x)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.One("", "Mike", nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrStructPtrNeeded, err)
|
||||
|
||||
err = db.One("", nil, nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrStructPtrNeeded, err)
|
||||
|
||||
y := UniqueNameUser{Name: "Jake", ID: 200}
|
||||
err = db.Save(&y)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var y2 UniqueNameUser
|
||||
err = db.One("ID", 200, &y2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, y, y2)
|
||||
|
||||
n := NestedID{}
|
||||
n.ID = "100"
|
||||
n.Name = "John"
|
||||
|
||||
err = db.Save(&n)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var n2 NestedID
|
||||
err = db.One("ID", "100", &n2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, n, n2)
|
||||
}
|
||||
|
||||
func TestOneNotWritable(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
err := db.Save(&User{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
db.Close()
|
||||
|
||||
db, _ = Open(filepath.Join(dir, "storm.db"), BoltOptions(0660, &bolt.Options{
|
||||
ReadOnly: true,
|
||||
}))
|
||||
defer db.Close()
|
||||
|
||||
err = db.Save(&User{ID: 20, Name: "John"})
|
||||
assert.Error(t, err)
|
||||
|
||||
var u User
|
||||
err = db.One("ID", 10, &u)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 10, u.ID)
|
||||
assert.Equal(t, "John", u.Name)
|
||||
|
||||
err = db.One("Name", "John", &u)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 10, u.ID)
|
||||
assert.Equal(t, "John", u.Name)
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/asdine/storm/codec"
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// BoltOptions used to pass options to BoltDB.
|
||||
func BoltOptions(mode os.FileMode, options *bolt.Options) func(*DB) error {
|
||||
return func(d *DB) error {
|
||||
d.boltMode = mode
|
||||
d.boltOptions = options
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Codec used to set a custom encoder and decoder. The default is GOB.
|
||||
func Codec(c codec.EncodeDecoder) func(*DB) error {
|
||||
return func(d *DB) error {
|
||||
d.Codec = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// AutoIncrement used to enable bolt.NextSequence on empty integer ids.
|
||||
func AutoIncrement() func(*DB) error {
|
||||
return func(d *DB) error {
|
||||
d.autoIncrement = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Root used to set the root bucket. See also the From method.
|
||||
func Root(root ...string) func(*DB) error {
|
||||
return func(d *DB) error {
|
||||
d.rootBucket = root
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Limit sets the maximum number of records to return
|
||||
func Limit(limit int) func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Limit = limit
|
||||
}
|
||||
}
|
||||
|
||||
// Skip sets the number of records to skip
|
||||
func Skip(offset int) func(*index.Options) {
|
||||
return func(opts *index.Options) {
|
||||
opts.Skip = offset
|
||||
}
|
||||
}
|
|
@ -0,0 +1,100 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/asdine/storm/index"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// Range returns one or more records by the specified index within the specified range
|
||||
func (n *Node) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
|
||||
ref := reflect.ValueOf(to)
|
||||
|
||||
if ref.Kind() != reflect.Ptr || reflect.Indirect(ref).Kind() != reflect.Slice {
|
||||
return ErrSlicePtrNeeded
|
||||
}
|
||||
|
||||
typ := reflect.Indirect(ref).Type().Elem()
|
||||
newElem := reflect.New(typ)
|
||||
|
||||
d := structs.New(newElem.Interface())
|
||||
bucketName := d.Name()
|
||||
if bucketName == "" {
|
||||
return ErrNoName
|
||||
}
|
||||
|
||||
field, ok := d.FieldOk(fieldName)
|
||||
if !ok {
|
||||
return fmt.Errorf("field %s not found", fieldName)
|
||||
}
|
||||
|
||||
tag := field.Tag("storm")
|
||||
if tag == "" {
|
||||
return fmt.Errorf("index %s not found", fieldName)
|
||||
}
|
||||
|
||||
mn, err := toBytes(min, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mx, err := toBytes(max, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := index.NewOptions()
|
||||
for _, fn := range options {
|
||||
fn(opts)
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.rnge(n.tx, bucketName, fieldName, tag, &ref, mn, mx, opts)
|
||||
}
|
||||
|
||||
return n.s.Bolt.View(func(tx *bolt.Tx) error {
|
||||
return n.rnge(tx, bucketName, fieldName, tag, &ref, mn, mx, opts)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) rnge(tx *bolt.Tx, bucketName, fieldName, tag string, ref *reflect.Value, min, max []byte, opts *index.Options) error {
|
||||
bucket := n.GetBucket(tx, bucketName)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("bucket %s not found", bucketName)
|
||||
}
|
||||
|
||||
idx, err := getIndex(bucket, tag, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
list, err := idx.Range(min, max, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
results := reflect.MakeSlice(reflect.Indirect(*ref).Type(), len(list), len(list))
|
||||
|
||||
for i := range list {
|
||||
raw := bucket.Get(list[i])
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
err = n.s.Codec.Decode(raw, results.Index(i).Addr().Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
reflect.Indirect(*ref).Set(results)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Range returns one or more records by the specified index within the specified range
|
||||
func (s *DB) Range(fieldName string, min, max, to interface{}, options ...func(*index.Options)) error {
|
||||
return s.root.Range(fieldName, min, max, to, options...)
|
||||
}
|
|
@ -0,0 +1,84 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRange(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
w := User{
|
||||
Name: "John",
|
||||
ID: i + 1,
|
||||
Slug: fmt.Sprintf("John%03d", i+1),
|
||||
DateOfBirth: time.Now().Add(-time.Duration(i) * time.Hour),
|
||||
}
|
||||
err := db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
z := User{Name: fmt.Sprintf("Zach%03d", i+1), ID: i + 101, Slug: fmt.Sprintf("Zach%03d", i+1)}
|
||||
err = db.Save(&z)
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
min := "John010"
|
||||
max := "John020"
|
||||
var users []User
|
||||
err := db.Range("Slug", min, max, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 11)
|
||||
assert.Equal(t, "John010", users[0].Slug)
|
||||
assert.Equal(t, "John020", users[10].Slug)
|
||||
|
||||
min = "Zach010"
|
||||
max = "Zach020"
|
||||
users = nil
|
||||
err = db.Range("Name", min, max, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 11)
|
||||
assert.Equal(t, "Zach010", users[0].Name)
|
||||
assert.Equal(t, "Zach020", users[10].Name)
|
||||
|
||||
err = db.Range("Name", min, max, &User{})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrSlicePtrNeeded, err)
|
||||
|
||||
notTheRightUsers := []UniqueNameUser{}
|
||||
|
||||
err = db.Range("Name", min, max, ¬TheRightUsers)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "bucket UniqueNameUser not found")
|
||||
|
||||
users = nil
|
||||
|
||||
err = db.Range("Age", min, max, &users)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "field Age not found")
|
||||
|
||||
dateMin := time.Now().Add(-time.Duration(50) * time.Hour)
|
||||
dateMax := dateMin.Add(time.Duration(3) * time.Hour)
|
||||
err = db.Range("DateOfBirth", dateMin, dateMax, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 3)
|
||||
assert.Equal(t, "John050", users[0].Slug)
|
||||
assert.Equal(t, "John048", users[2].Slug)
|
||||
|
||||
err = db.Range("Slug", "John010", "John040", &users, Limit(10), Skip(20))
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 10)
|
||||
assert.Equal(t, 30, users[0].ID)
|
||||
assert.Equal(t, 39, users[9].ID)
|
||||
|
||||
err = db.Range("Group", min, max, &users)
|
||||
assert.Error(t, err)
|
||||
assert.EqualError(t, err, "index Group not found")
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// Remove removes a structure from the associated bucket
|
||||
func (n *Node) Remove(data interface{}) error {
|
||||
if !structs.IsStruct(data) {
|
||||
return ErrBadType
|
||||
}
|
||||
|
||||
info, err := extract(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err := toBytes(info.ID.Value, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.remove(n.tx, info, id)
|
||||
}
|
||||
|
||||
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return n.remove(tx, info, id)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) remove(tx *bolt.Tx, info *modelInfo, id []byte) error {
|
||||
bucket := n.GetBucket(tx, info.Name)
|
||||
if bucket == nil {
|
||||
return fmt.Errorf("bucket %s doesn't exist", info.Name)
|
||||
}
|
||||
|
||||
for fieldName, idxInfo := range info.Indexes {
|
||||
idx, err := getIndex(bucket, idxInfo.Type, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
raw := bucket.Get(id)
|
||||
if raw == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
return bucket.Delete(id)
|
||||
}
|
||||
|
||||
// Remove removes a structure from the associated bucket
|
||||
func (s *DB) Remove(data interface{}) error {
|
||||
return s.root.Remove(data)
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestRemove(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
u1 := IndexedNameUser{ID: 10, Name: "John", age: 10}
|
||||
err := db.Save(&u1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Remove(&u1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Remove(&u1)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
u2 := IndexedNameUser{}
|
||||
err = db.Get("IndexedNameUser", 10, &u2)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.Remove(nil)
|
||||
assert.Equal(t, ErrBadType, err)
|
||||
|
||||
var users []User
|
||||
for i := 0; i < 10; i++ {
|
||||
user := User{Name: "John", ID: i + 1, Slug: fmt.Sprintf("John%d", i+1), DateOfBirth: time.Now().Add(-time.Duration(i*10) * time.Minute)}
|
||||
err := db.Save(&user)
|
||||
assert.NoError(t, err)
|
||||
users = append(users, user)
|
||||
}
|
||||
|
||||
err = db.Remove(&users[0])
|
||||
assert.NoError(t, err)
|
||||
err = db.Remove(&users[1])
|
||||
assert.NoError(t, err)
|
||||
|
||||
users = nil
|
||||
err = db.All(&users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 8)
|
||||
assert.Equal(t, 3, users[0].ID)
|
||||
}
|
|
@ -0,0 +1,113 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/fatih/structs"
|
||||
)
|
||||
|
||||
// Save a structure
|
||||
func (n *Node) Save(data interface{}) error {
|
||||
if !structs.IsStruct(data) {
|
||||
return ErrBadType
|
||||
}
|
||||
|
||||
info, err := extract(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var id []byte
|
||||
|
||||
if info.ID.IsZero {
|
||||
if !info.ID.IsOfIntegerFamily() || !n.s.autoIncrement {
|
||||
return ErrZeroID
|
||||
}
|
||||
} else {
|
||||
id, err = toBytes(info.ID.Value, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var raw []byte
|
||||
// postpone encoding if AutoIncrement mode if enabled
|
||||
if !n.s.autoIncrement {
|
||||
raw, err = n.s.Codec.Encode(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.save(n.tx, info, id, raw)
|
||||
}
|
||||
|
||||
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return n.save(tx, info, id, raw)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) save(tx *bolt.Tx, info *modelInfo, id []byte, raw []byte) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, info.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.ID.IsZero {
|
||||
// isZero and integer, generate next sequence
|
||||
intID, _ := bucket.NextSequence()
|
||||
|
||||
// convert to the right integer size
|
||||
err = info.ID.Field.Set(reflect.ValueOf(intID).Convert(info.ID.Type()).Interface())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
id, err = toBytes(info.ID.Field.Value(), n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if n.s.autoIncrement {
|
||||
raw, err = n.s.Codec.Encode(info.data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for fieldName, idxInfo := range info.Indexes {
|
||||
idx, err := getIndex(bucket, idxInfo.Type, fieldName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.RemoveID(id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if idxInfo.Field.IsZero() {
|
||||
continue
|
||||
}
|
||||
|
||||
value, err := toBytes(idxInfo.Field.Value(), n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = idx.Add(value, id)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return bucket.Put(id, raw)
|
||||
}
|
||||
|
||||
// Save a structure
|
||||
func (s *DB) Save(data interface{}) error {
|
||||
return s.root.Save(data)
|
||||
}
|
|
@ -0,0 +1,269 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm/codec/gob"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSave(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
err := db.Save(&SimpleUser{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Save(&SimpleUser{Name: "John"})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrZeroID, err)
|
||||
|
||||
err = db.Save(&ClassicBadTags{ID: "id", PublicField: 100})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrUnknownTag, err)
|
||||
|
||||
err = db.Save(&UserWithNoID{Name: "John"})
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNoID, err)
|
||||
|
||||
err = db.Save(&UserWithIDField{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
u := UserWithEmbeddedIDField{}
|
||||
u.ID = 150
|
||||
u.Name = "Pete"
|
||||
u.Age = 10
|
||||
err = db.Save(&u)
|
||||
assert.NoError(t, err)
|
||||
|
||||
v := UserWithIDField{ID: 10, Name: "John"}
|
||||
err = db.Save(&v)
|
||||
assert.NoError(t, err)
|
||||
|
||||
w := UserWithEmbeddedField{}
|
||||
w.ID = 150
|
||||
w.Name = "John"
|
||||
err = db.Save(&w)
|
||||
assert.NoError(t, err)
|
||||
|
||||
db.Bolt.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte("UserWithIDField"))
|
||||
assert.NotNil(t, bucket)
|
||||
|
||||
i, err := toBytes(10, gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
val := bucket.Get(i)
|
||||
assert.NotNil(t, val)
|
||||
|
||||
content, err := db.Codec.Encode(&v)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, content, val)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestSaveUnique(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
u1 := UniqueNameUser{ID: 10, Name: "John", age: 10}
|
||||
err := db.Save(&u1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
u2 := UniqueNameUser{ID: 11, Name: "John", age: 100}
|
||||
err = db.Save(&u2)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrAlreadyExists, err)
|
||||
|
||||
// same id
|
||||
u3 := UniqueNameUser{ID: 10, Name: "Jake", age: 100}
|
||||
err = db.Save(&u3)
|
||||
assert.NoError(t, err)
|
||||
|
||||
db.Bolt.View(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket([]byte("UniqueNameUser"))
|
||||
|
||||
uniqueBucket := bucket.Bucket([]byte(indexPrefix + "Name"))
|
||||
assert.NotNil(t, uniqueBucket)
|
||||
|
||||
id := uniqueBucket.Get([]byte("Jake"))
|
||||
i, err := toBytes(10, gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, i, id)
|
||||
|
||||
id = uniqueBucket.Get([]byte("John"))
|
||||
assert.Nil(t, id)
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
func TestSaveIndex(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
u1 := IndexedNameUser{ID: 10, Name: "John", age: 10}
|
||||
err := db.Save(&u1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
u1 = IndexedNameUser{ID: 10, Name: "John", age: 10}
|
||||
err = db.Save(&u1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
u2 := IndexedNameUser{ID: 11, Name: "John", age: 100}
|
||||
err = db.Save(&u2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
name1 := "Jake"
|
||||
name2 := "Jane"
|
||||
name3 := "James"
|
||||
|
||||
for i := 0; i < 1000; i++ {
|
||||
u := IndexedNameUser{ID: i + 1}
|
||||
|
||||
if i%2 == 0 {
|
||||
u.Name = name1
|
||||
} else {
|
||||
u.Name = name2
|
||||
}
|
||||
|
||||
db.Save(&u)
|
||||
}
|
||||
|
||||
var users []IndexedNameUser
|
||||
err = db.Find("Name", name1, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 500)
|
||||
|
||||
err = db.Find("Name", name2, &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 500)
|
||||
|
||||
err = db.Find("Name", name3, &users)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrNotFound, err)
|
||||
|
||||
err = db.Save(nil)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrBadType, err)
|
||||
}
|
||||
|
||||
func TestSaveEmptyValues(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
defer db.Close()
|
||||
|
||||
u := User{
|
||||
ID: 10,
|
||||
}
|
||||
err := db.Save(&u)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var v User
|
||||
err = db.One("ID", 10, &v)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 10, v.ID)
|
||||
|
||||
u.Name = "John"
|
||||
u.Slug = "john"
|
||||
err = db.Save(&u)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.One("Name", "John", &v)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "John", v.Name)
|
||||
assert.Equal(t, "john", v.Slug)
|
||||
err = db.One("Slug", "john", &v)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "John", v.Name)
|
||||
assert.Equal(t, "john", v.Slug)
|
||||
|
||||
u.Name = ""
|
||||
u.Slug = ""
|
||||
err = db.Save(&u)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.One("Name", "John", &v)
|
||||
assert.Error(t, err)
|
||||
err = db.One("Slug", "john", &v)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestSaveAutoIncrement(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"), AutoIncrement())
|
||||
defer db.Close()
|
||||
|
||||
for i := 1; i < 10; i++ {
|
||||
s := SimpleUser{Name: "John"}
|
||||
err := db.Save(&s)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, i, s.ID)
|
||||
}
|
||||
|
||||
u := UserWithUint64IDField{Name: "John"}
|
||||
err := db.Save(&u)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1), u.ID)
|
||||
v := UserWithUint64IDField{}
|
||||
err = db.One("ID", uint64(1), &v)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, u, v)
|
||||
|
||||
ui := UserWithIDField{Name: "John"}
|
||||
err = db.Save(&ui)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, ui.ID)
|
||||
vi := UserWithIDField{}
|
||||
err = db.One("ID", 1, &vi)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, ui, vi)
|
||||
|
||||
us := UserWithStringIDField{Name: "John"}
|
||||
err = db.Save(&us)
|
||||
assert.Error(t, err)
|
||||
assert.Equal(t, ErrZeroID, err)
|
||||
}
|
||||
|
||||
func TestSaveDifferentBucketRoot(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"), AutoIncrement())
|
||||
defer db.Close()
|
||||
|
||||
assert.Len(t, db.rootBucket, 0)
|
||||
|
||||
dbSub := db.From("sub")
|
||||
|
||||
assert.NotEqual(t, dbSub, db)
|
||||
assert.Len(t, dbSub.rootBucket, 1)
|
||||
|
||||
err := db.Save(&User{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
err = dbSub.Save(&User{ID: 11, Name: "Paul"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var (
|
||||
john User
|
||||
paul User
|
||||
)
|
||||
|
||||
err = db.One("Name", "John", &john)
|
||||
assert.NoError(t, err)
|
||||
err = db.One("Name", "Paul", &paul)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = dbSub.One("Name", "Paul", &paul)
|
||||
assert.NoError(t, err)
|
||||
err = dbSub.One("Name", "John", &john)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,46 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// Set a key/value pair into a bucket
|
||||
func (n *Node) Set(bucketName string, key interface{}, value interface{}) error {
|
||||
if key == nil {
|
||||
return ErrNilParam
|
||||
}
|
||||
|
||||
id, err := toBytes(key, n.s.Codec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var data []byte
|
||||
if value != nil {
|
||||
data, err = n.s.Codec.Encode(value)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if n.tx != nil {
|
||||
return n.set(n.tx, bucketName, id, data)
|
||||
}
|
||||
|
||||
return n.s.Bolt.Update(func(tx *bolt.Tx) error {
|
||||
return n.set(tx, bucketName, id, data)
|
||||
})
|
||||
}
|
||||
|
||||
func (n *Node) set(tx *bolt.Tx, bucketName string, id, data []byte) error {
|
||||
bucket, err := n.CreateBucketIfNotExists(tx, bucketName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return bucket.Put(id, data)
|
||||
}
|
||||
|
||||
// Set a key/value pair into a bucket
|
||||
func (s *DB) Set(bucketName string, key interface{}, value interface{}) error {
|
||||
return s.root.Set(bucketName, key, value)
|
||||
}
|
|
@ -0,0 +1,67 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net/mail"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/asdine/storm/codec/gob"
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSet(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
err := db.Set("b1", 10, 10)
|
||||
assert.NoError(t, err)
|
||||
err = db.Set("b1", "best friend's mail", &mail.Address{Name: "Gandalf", Address: "gandalf@lorien.ma"})
|
||||
assert.NoError(t, err)
|
||||
err = db.Set("b2", []byte("i'm already a slice of bytes"), "a value")
|
||||
assert.NoError(t, err)
|
||||
err = db.Set("b2", []byte("i'm already a slice of bytes"), nil)
|
||||
assert.NoError(t, err)
|
||||
err = db.Set("b1", 0, 100)
|
||||
assert.NoError(t, err)
|
||||
err = db.Set("b1", nil, 100)
|
||||
assert.Error(t, err)
|
||||
|
||||
db.Bolt.View(func(tx *bolt.Tx) error {
|
||||
b1 := tx.Bucket([]byte("b1"))
|
||||
assert.NotNil(t, b1)
|
||||
b2 := tx.Bucket([]byte("b2"))
|
||||
assert.NotNil(t, b2)
|
||||
|
||||
k1, err := toBytes(10, gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
val := b1.Get(k1)
|
||||
assert.NotNil(t, val)
|
||||
|
||||
k2 := []byte("best friend's mail")
|
||||
val = b1.Get(k2)
|
||||
assert.NotNil(t, val)
|
||||
|
||||
k3, err := toBytes(0, gob.Codec)
|
||||
assert.NoError(t, err)
|
||||
val = b1.Get(k3)
|
||||
assert.NotNil(t, val)
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
err = db.Set("", 0, 100)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = db.Set("b", nil, 100)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = db.Set("b", 10, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.Set("b", nil, nil)
|
||||
assert.Error(t, err)
|
||||
}
|
|
@ -0,0 +1,106 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/asdine/storm/codec"
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
// Open opens a database at the given path with optional Storm options.
|
||||
func Open(path string, stormOptions ...func(*DB) error) (*DB, error) {
|
||||
var err error
|
||||
|
||||
s := &DB{
|
||||
Path: path,
|
||||
Codec: defaultCodec,
|
||||
}
|
||||
|
||||
for _, option := range stormOptions {
|
||||
option(s)
|
||||
}
|
||||
|
||||
if s.boltMode == 0 {
|
||||
s.boltMode = 0600
|
||||
}
|
||||
|
||||
if s.boltOptions == nil {
|
||||
s.boltOptions = &bolt.Options{Timeout: 1 * time.Second}
|
||||
}
|
||||
|
||||
s.Bolt, err = bolt.Open(path, s.boltMode, s.boltOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s.root = &Node{s: s, rootBucket: s.rootBucket}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// OpenWithOptions opens a database with the given boltDB options and optional Storm options.
|
||||
// Deprecated: Use storm.Open with storm.BoltOptions instead.
|
||||
func OpenWithOptions(path string, mode os.FileMode, boltOptions *bolt.Options, stormOptions ...func(*DB)) (*DB, error) {
|
||||
db, err := bolt.Open(path, mode, boltOptions)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &DB{
|
||||
Path: path,
|
||||
Bolt: db,
|
||||
Codec: defaultCodec,
|
||||
}
|
||||
|
||||
for _, option := range stormOptions {
|
||||
option(s)
|
||||
}
|
||||
|
||||
s.root = &Node{s: s, rootBucket: s.rootBucket}
|
||||
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// DB is the wrapper around BoltDB. It contains an instance of BoltDB and uses it to perform all the
|
||||
// needed operations
|
||||
type DB struct {
|
||||
// Path of the database file
|
||||
Path string
|
||||
|
||||
// Handles encoding and decoding of objects
|
||||
Codec codec.EncodeDecoder
|
||||
|
||||
// Bolt is still easily accessible
|
||||
Bolt *bolt.DB
|
||||
|
||||
// Bolt file mode
|
||||
boltMode os.FileMode
|
||||
|
||||
// Bolt options
|
||||
boltOptions *bolt.Options
|
||||
|
||||
// Enable auto increment on empty integer fields
|
||||
autoIncrement bool
|
||||
|
||||
// The root node that points to the root bucket.
|
||||
root *Node
|
||||
|
||||
// The root bucket name
|
||||
rootBucket []string
|
||||
}
|
||||
|
||||
// From returns a new Storm node with a new bucket root.
|
||||
// All DB operations on the new node will be executed relative to the given
|
||||
// bucket.
|
||||
func (s *DB) From(root ...string) *Node {
|
||||
newNode := *s.root
|
||||
newNode.rootBucket = root
|
||||
return &newNode
|
||||
}
|
||||
|
||||
// WithTransaction returns a New Storm node that will use the given transaction.
|
||||
func (s *DB) WithTransaction(tx *bolt.Tx) *Node {
|
||||
return s.root.WithTransaction(tx)
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestNewStorm(t *testing.T) {
|
||||
db, err := Open("")
|
||||
|
||||
assert.Error(t, err)
|
||||
assert.Nil(t, db)
|
||||
|
||||
dir, err := ioutil.TempDir(os.TempDir(), "storm")
|
||||
assert.NoError(t, err)
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
file := filepath.Join(dir, "storm.db")
|
||||
db, err = Open(file)
|
||||
defer db.Close()
|
||||
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, file, db.Path)
|
||||
assert.NotNil(t, db.Bolt)
|
||||
assert.Equal(t, defaultCodec, db.Codec)
|
||||
}
|
||||
|
||||
func TestNewStormWithOptions(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := OpenWithOptions(filepath.Join(dir, "storm.db"), 0600, nil)
|
||||
defer db.Close()
|
||||
|
||||
err := db.Save(&SimpleUser{ID: 10})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestNewStormWithStormOptions(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
|
||||
dc := new(dummyCodec)
|
||||
db1, _ := Open(filepath.Join(dir, "storm1.db"), BoltOptions(0660, &bolt.Options{Timeout: 10 * time.Second}), Codec(dc), AutoIncrement(), Root("a", "b"))
|
||||
assert.Equal(t, dc, db1.Codec)
|
||||
assert.True(t, db1.autoIncrement)
|
||||
assert.Equal(t, os.FileMode(0660), db1.boltMode)
|
||||
assert.Equal(t, 10*time.Second, db1.boltOptions.Timeout)
|
||||
assert.Equal(t, []string{"a", "b"}, db1.rootBucket)
|
||||
assert.Equal(t, []string{"a", "b"}, db1.root.rootBucket)
|
||||
|
||||
err := db1.Save(&SimpleUser{ID: 1})
|
||||
assert.NoError(t, err)
|
||||
|
||||
db2, _ := Open(filepath.Join(dir, "storm2.db"), Codec(dc))
|
||||
assert.Equal(t, dc, db2.Codec)
|
||||
}
|
||||
|
||||
type dummyCodec int
|
||||
|
||||
func (c dummyCodec) Encode(v interface{}) ([]byte, error) {
|
||||
return []byte("dummy"), nil
|
||||
}
|
||||
|
||||
func (c dummyCodec) Decode(b []byte, v interface{}) error {
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,129 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io"
|
||||
"time"
|
||||
)
|
||||
|
||||
type ClassicNoTags struct {
|
||||
PublicField int
|
||||
privateField string
|
||||
Date time.Time
|
||||
InlineStruct struct {
|
||||
a float32
|
||||
B float64
|
||||
}
|
||||
Interf io.Writer
|
||||
}
|
||||
|
||||
type ClassicBadTags struct {
|
||||
ID string
|
||||
PublicField int `storm:"mrots"`
|
||||
privateField string
|
||||
Date time.Time
|
||||
InlineStruct struct {
|
||||
a float32
|
||||
B float64
|
||||
}
|
||||
Interf io.Writer
|
||||
}
|
||||
|
||||
type ClassicUnique struct {
|
||||
ID string
|
||||
PublicField int `storm:"unique"`
|
||||
privateField string `storm:"unique"`
|
||||
privateField2 string `storm:"unique"`
|
||||
Date time.Time `storm:"unique"`
|
||||
InlineStruct struct {
|
||||
a float32
|
||||
B float64
|
||||
} `storm:"unique"`
|
||||
Interf io.Writer `storm:"unique"`
|
||||
}
|
||||
|
||||
type ClassicIndex struct {
|
||||
ID string
|
||||
PublicField int `storm:"index"`
|
||||
privateField string `storm:"index"`
|
||||
Date time.Time `storm:"index"`
|
||||
InlineStruct struct {
|
||||
a float32
|
||||
B float64
|
||||
} `storm:"index"`
|
||||
InlineStructPtr *UserWithNoID `storm:"index"`
|
||||
Interf io.Writer `storm:"index"`
|
||||
}
|
||||
|
||||
type ClassicInline struct {
|
||||
PublicField int `storm:"unique"`
|
||||
ClassicIndex `storm:"inline"`
|
||||
*ToEmbed `storm:"inline"`
|
||||
Date time.Time `storm:"unique"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int `storm:"id"`
|
||||
Name string `storm:"index"`
|
||||
age int
|
||||
DateOfBirth time.Time `storm:"index"`
|
||||
Group string
|
||||
Slug string `storm:"unique"`
|
||||
}
|
||||
|
||||
type ToEmbed struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
type NestedID struct {
|
||||
ToEmbed `storm:"inline"`
|
||||
Name string
|
||||
}
|
||||
|
||||
type SimpleUser struct {
|
||||
ID int `storm:"id"`
|
||||
Name string
|
||||
age int
|
||||
}
|
||||
|
||||
type UserWithNoID struct {
|
||||
Name string
|
||||
}
|
||||
|
||||
type UserWithIDField struct {
|
||||
ID int
|
||||
Name string
|
||||
}
|
||||
|
||||
type UserWithUint64IDField struct {
|
||||
ID uint64
|
||||
Name string
|
||||
}
|
||||
|
||||
type UserWithStringIDField struct {
|
||||
ID string
|
||||
Name string
|
||||
}
|
||||
|
||||
type UserWithEmbeddedIDField struct {
|
||||
UserWithIDField `storm:"inline"`
|
||||
Age int
|
||||
}
|
||||
|
||||
type UserWithEmbeddedField struct {
|
||||
UserWithNoID `storm:"inline"`
|
||||
ID uint64
|
||||
}
|
||||
|
||||
type IndexedNameUser struct {
|
||||
ID int `storm:"id"`
|
||||
Name string `storm:"index"`
|
||||
age int
|
||||
DateOfBirth time.Time `storm:"index"`
|
||||
Group string
|
||||
}
|
||||
|
||||
type UniqueNameUser struct {
|
||||
ID int `storm:"id"`
|
||||
Name string `storm:"unique"`
|
||||
age int
|
||||
}
|
|
@ -0,0 +1,52 @@
|
|||
package storm
|
||||
|
||||
// Begin starts a new transaction.
|
||||
func (n Node) Begin(writable bool) (*Node, error) {
|
||||
var err error
|
||||
|
||||
n.tx, err = n.s.Bolt.Begin(writable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &n, nil
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates.
|
||||
func (n *Node) Rollback() error {
|
||||
if n.tx == nil {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
err := n.tx.Rollback()
|
||||
n.tx = nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk.
|
||||
func (n *Node) Commit() error {
|
||||
if n.tx == nil {
|
||||
return ErrNotInTransaction
|
||||
}
|
||||
|
||||
err := n.tx.Commit()
|
||||
n.tx = nil
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// Begin starts a new transaction.
|
||||
func (s *DB) Begin(writable bool) (*Node, error) {
|
||||
return s.root.Begin(writable)
|
||||
}
|
||||
|
||||
// Rollback closes the transaction and ignores all previous updates.
|
||||
func (s *DB) Rollback() error {
|
||||
return s.root.Rollback()
|
||||
}
|
||||
|
||||
// Commit writes all changes to disk.
|
||||
func (s *DB) Commit() error {
|
||||
return s.root.Rollback()
|
||||
}
|
|
@ -0,0 +1,130 @@
|
|||
package storm
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestTransaction(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
err := db.Rollback()
|
||||
assert.Error(t, err)
|
||||
|
||||
err = db.Commit()
|
||||
assert.Error(t, err)
|
||||
|
||||
tx, err := db.Begin(true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.NotNil(t, tx.tx)
|
||||
|
||||
err = tx.Init(&SimpleUser{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.Save(&User{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.Save(&User{ID: 20, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.Save(&User{ID: 30, Name: "Steve"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var user User
|
||||
err = tx.One("ID", 10, &user)
|
||||
assert.NoError(t, err)
|
||||
|
||||
var users []User
|
||||
err = tx.AllByIndex("Name", &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 3)
|
||||
|
||||
err = tx.All(&users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 3)
|
||||
|
||||
err = tx.Find("Name", "Steve", &users)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, users, 1)
|
||||
|
||||
err = tx.Remove(&user)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.One("ID", 10, &user)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = tx.Set("b1", "best friend's mail", "mail@provider.com")
|
||||
assert.NoError(t, err)
|
||||
|
||||
var str string
|
||||
err = tx.Get("b1", "best friend's mail", &str)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "mail@provider.com", str)
|
||||
|
||||
err = tx.Delete("b1", "best friend's mail")
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.Get("b1", "best friend's mail", &str)
|
||||
assert.Error(t, err)
|
||||
|
||||
err = tx.Commit()
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Nil(t, tx.tx)
|
||||
|
||||
err = db.One("ID", 30, &user)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 30, user.ID)
|
||||
}
|
||||
|
||||
func TestTransactionRollback(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
tx, err := db.Begin(true)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.Save(&User{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
var user User
|
||||
err = tx.One("ID", 10, &user)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 10, user.ID)
|
||||
|
||||
err = tx.Rollback()
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = db.One("ID", 10, &user)
|
||||
assert.Error(t, err)
|
||||
}
|
||||
|
||||
func TestTransactionNotWritable(t *testing.T) {
|
||||
dir, _ := ioutil.TempDir(os.TempDir(), "storm")
|
||||
defer os.RemoveAll(dir)
|
||||
db, _ := Open(filepath.Join(dir, "storm.db"))
|
||||
|
||||
err := db.Save(&User{ID: 10, Name: "John"})
|
||||
assert.NoError(t, err)
|
||||
|
||||
tx, err := db.Begin(false)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.Save(&User{ID: 20, Name: "John"})
|
||||
assert.Error(t, err)
|
||||
|
||||
var user User
|
||||
err = tx.One("ID", 10, &user)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = tx.Rollback()
|
||||
assert.NoError(t, err)
|
||||
}
|
|
@ -0,0 +1,20 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2013 Ben Johnson
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,18 @@
|
|||
BRANCH=`git rev-parse --abbrev-ref HEAD`
|
||||
COMMIT=`git rev-parse --short HEAD`
|
||||
GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)"
|
||||
|
||||
default: build
|
||||
|
||||
race:
|
||||
@go test -v -race -test.run="TestSimulate_(100op|1000op)"
|
||||
|
||||
# go get github.com/kisielk/errcheck
|
||||
errcheck:
|
||||
@errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt
|
||||
|
||||
test:
|
||||
@go test -v -cover .
|
||||
@go test -v ./cmd/bolt
|
||||
|
||||
.PHONY: fmt test
|
|
@ -0,0 +1,850 @@
|
|||
Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.0-green.svg)
|
||||
====
|
||||
|
||||
Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas]
|
||||
[LMDB project][lmdb]. The goal of the project is to provide a simple,
|
||||
fast, and reliable database for projects that don't require a full database
|
||||
server such as Postgres or MySQL.
|
||||
|
||||
Since Bolt is meant to be used as such a low-level piece of functionality,
|
||||
simplicity is key. The API will be small and only focus on getting values
|
||||
and setting values. That's it.
|
||||
|
||||
[hyc_symas]: https://twitter.com/hyc_symas
|
||||
[lmdb]: http://symas.com/mdb/
|
||||
|
||||
## Project Status
|
||||
|
||||
Bolt is stable and the API is fixed. Full unit test coverage and randomized
|
||||
black box testing are used to ensure database consistency and thread safety.
|
||||
Bolt is currently in high-load production environments serving databases as
|
||||
large as 1TB. Many companies such as Shopify and Heroku use Bolt-backed
|
||||
services every day.
|
||||
|
||||
## Table of Contents
|
||||
|
||||
- [Getting Started](#getting-started)
|
||||
- [Installing](#installing)
|
||||
- [Opening a database](#opening-a-database)
|
||||
- [Transactions](#transactions)
|
||||
- [Read-write transactions](#read-write-transactions)
|
||||
- [Read-only transactions](#read-only-transactions)
|
||||
- [Batch read-write transactions](#batch-read-write-transactions)
|
||||
- [Managing transactions manually](#managing-transactions-manually)
|
||||
- [Using buckets](#using-buckets)
|
||||
- [Using key/value pairs](#using-keyvalue-pairs)
|
||||
- [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket)
|
||||
- [Iterating over keys](#iterating-over-keys)
|
||||
- [Prefix scans](#prefix-scans)
|
||||
- [Range scans](#range-scans)
|
||||
- [ForEach()](#foreach)
|
||||
- [Nested buckets](#nested-buckets)
|
||||
- [Database backups](#database-backups)
|
||||
- [Statistics](#statistics)
|
||||
- [Read-Only Mode](#read-only-mode)
|
||||
- [Mobile Use (iOS/Android)](#mobile-use-iosandroid)
|
||||
- [Resources](#resources)
|
||||
- [Comparison with other databases](#comparison-with-other-databases)
|
||||
- [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases)
|
||||
- [LevelDB, RocksDB](#leveldb-rocksdb)
|
||||
- [LMDB](#lmdb)
|
||||
- [Caveats & Limitations](#caveats--limitations)
|
||||
- [Reading the Source](#reading-the-source)
|
||||
- [Other Projects Using Bolt](#other-projects-using-bolt)
|
||||
|
||||
## Getting Started
|
||||
|
||||
### Installing
|
||||
|
||||
To start using Bolt, install Go and run `go get`:
|
||||
|
||||
```sh
|
||||
$ go get github.com/boltdb/bolt/...
|
||||
```
|
||||
|
||||
This will retrieve the library and install the `bolt` command line utility into
|
||||
your `$GOBIN` path.
|
||||
|
||||
|
||||
### Opening a database
|
||||
|
||||
The top-level object in Bolt is a `DB`. It is represented as a single file on
|
||||
your disk and represents a consistent snapshot of your data.
|
||||
|
||||
To open your database, simply use the `bolt.Open()` function:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"github.com/boltdb/bolt"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Open the my.db data file in your current directory.
|
||||
// It will be created if it doesn't exist.
|
||||
db, err := bolt.Open("my.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer db.Close()
|
||||
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
Please note that Bolt obtains a file lock on the data file so multiple processes
|
||||
cannot open the same database at the same time. Opening an already open Bolt
|
||||
database will cause it to hang until the other process closes it. To prevent
|
||||
an indefinite wait you can pass a timeout option to the `Open()` function:
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second})
|
||||
```
|
||||
|
||||
|
||||
### Transactions
|
||||
|
||||
Bolt allows only one read-write transaction at a time but allows as many
|
||||
read-only transactions as you want at a time. Each transaction has a consistent
|
||||
view of the data as it existed when the transaction started.
|
||||
|
||||
Individual transactions and all objects created from them (e.g. buckets, keys)
|
||||
are not thread safe. To work with data in multiple goroutines you must start
|
||||
a transaction for each one or use locking to ensure only one goroutine accesses
|
||||
a transaction at a time. Creating transaction from the `DB` is thread safe.
|
||||
|
||||
Read-only transactions and read-write transactions should not depend on one
|
||||
another and generally shouldn't be opened simultaneously in the same goroutine.
|
||||
This can cause a deadlock as the read-write transaction needs to periodically
|
||||
re-map the data file but it cannot do so while a read-only transaction is open.
|
||||
|
||||
|
||||
#### Read-write transactions
|
||||
|
||||
To start a read-write transaction, you can use the `DB.Update()` function:
|
||||
|
||||
```go
|
||||
err := db.Update(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Inside the closure, you have a consistent view of the database. You commit the
|
||||
transaction by returning `nil` at the end. You can also rollback the transaction
|
||||
at any point by returning an error. All database operations are allowed inside
|
||||
a read-write transaction.
|
||||
|
||||
Always check the return error as it will report any disk failures that can cause
|
||||
your transaction to not complete. If you return an error within your closure
|
||||
it will be passed through.
|
||||
|
||||
|
||||
#### Read-only transactions
|
||||
|
||||
To start a read-only transaction, you can use the `DB.View()` function:
|
||||
|
||||
```go
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You also get a consistent view of the database within this closure, however,
|
||||
no mutating operations are allowed within a read-only transaction. You can only
|
||||
retrieve buckets, retrieve values, and copy the database within a read-only
|
||||
transaction.
|
||||
|
||||
|
||||
#### Batch read-write transactions
|
||||
|
||||
Each `DB.Update()` waits for disk to commit the writes. This overhead
|
||||
can be minimized by combining multiple updates with the `DB.Batch()`
|
||||
function:
|
||||
|
||||
```go
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
...
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Concurrent Batch calls are opportunistically combined into larger
|
||||
transactions. Batch is only useful when there are multiple goroutines
|
||||
calling it.
|
||||
|
||||
The trade-off is that `Batch` can call the given
|
||||
function multiple times, if parts of the transaction fail. The
|
||||
function must be idempotent and side effects must take effect only
|
||||
after a successful return from `DB.Batch()`.
|
||||
|
||||
For example: don't display messages from inside the function, instead
|
||||
set variables in the enclosing scope:
|
||||
|
||||
```go
|
||||
var id uint64
|
||||
err := db.Batch(func(tx *bolt.Tx) error {
|
||||
// Find last key in bucket, decode as bigendian uint64, increment
|
||||
// by one, encode back to []byte, and add new key.
|
||||
...
|
||||
id = newValue
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return ...
|
||||
}
|
||||
fmt.Println("Allocated ID %d", id)
|
||||
```
|
||||
|
||||
|
||||
#### Managing transactions manually
|
||||
|
||||
The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()`
|
||||
function. These helper functions will start the transaction, execute a function,
|
||||
and then safely close your transaction if an error is returned. This is the
|
||||
recommended way to use Bolt transactions.
|
||||
|
||||
However, sometimes you may want to manually start and end your transactions.
|
||||
You can use the `Tx.Begin()` function directly but **please** be sure to close
|
||||
the transaction.
|
||||
|
||||
```go
|
||||
// Start a writable transaction.
|
||||
tx, err := db.Begin(true)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tx.Rollback()
|
||||
|
||||
// Use the transaction...
|
||||
_, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Commit the transaction and check for error.
|
||||
if err := tx.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
The first argument to `DB.Begin()` is a boolean stating if the transaction
|
||||
should be writable.
|
||||
|
||||
|
||||
### Using buckets
|
||||
|
||||
Buckets are collections of key/value pairs within the database. All keys in a
|
||||
bucket must be unique. You can create a bucket using the `DB.CreateBucket()`
|
||||
function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b, err := tx.CreateBucket([]byte("MyBucket"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("create bucket: %s", err)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
You can also create a bucket only if it doesn't exist by using the
|
||||
`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this
|
||||
function for all your top-level buckets after you open your database so you can
|
||||
guarantee that they exist for future transactions.
|
||||
|
||||
To delete a bucket, simply call the `Tx.DeleteBucket()` function.
|
||||
|
||||
|
||||
### Using key/value pairs
|
||||
|
||||
To save a key/value pair to a bucket, use the `Bucket.Put()` function:
|
||||
|
||||
```go
|
||||
db.Update(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
err := b.Put([]byte("answer"), []byte("42"))
|
||||
return err
|
||||
})
|
||||
```
|
||||
|
||||
This will set the value of the `"answer"` key to `"42"` in the `MyBucket`
|
||||
bucket. To retrieve this value, we can use the `Bucket.Get()` function:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
v := b.Get([]byte("answer"))
|
||||
fmt.Printf("The answer is: %s\n", v)
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The `Get()` function does not return an error because its operation is
|
||||
guaranteed to work (unless there is some kind of system failure). If the key
|
||||
exists then it will return its byte slice value. If it doesn't exist then it
|
||||
will return `nil`. It's important to note that you can have a zero-length value
|
||||
set to a key which is different than the key not existing.
|
||||
|
||||
Use the `Bucket.Delete()` function to delete a key from the bucket.
|
||||
|
||||
Please note that values returned from `Get()` are only valid while the
|
||||
transaction is open. If you need to use a value outside of the transaction
|
||||
then you must use `copy()` to copy it to another byte slice.
|
||||
|
||||
|
||||
### Autoincrementing integer for the bucket
|
||||
By using the `NextSequence()` function, you can let Bolt determine a sequence
|
||||
which can be used as the unique identifier for your key/value pairs. See the
|
||||
example below.
|
||||
|
||||
```go
|
||||
// CreateUser saves u to the store. The new user ID is set on u once the data is persisted.
|
||||
func (s *Store) CreateUser(u *User) error {
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
// Retrieve the users bucket.
|
||||
// This should be created when the DB is first opened.
|
||||
b := tx.Bucket([]byte("users"))
|
||||
|
||||
// Generate ID for the user.
|
||||
// This returns an error only if the Tx is closed or not writeable.
|
||||
// That can't happen in an Update() call so I ignore the error check.
|
||||
id, _ = b.NextSequence()
|
||||
u.ID = int(id)
|
||||
|
||||
// Marshal user data into bytes.
|
||||
buf, err := json.Marshal(u)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Persist bytes to users bucket.
|
||||
return b.Put(itob(u.ID), buf)
|
||||
})
|
||||
}
|
||||
|
||||
// itob returns an 8-byte big endian representation of v.
|
||||
func itob(v int) []byte {
|
||||
b := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(b, uint64(v))
|
||||
return b
|
||||
}
|
||||
|
||||
type User struct {
|
||||
ID int
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
### Iterating over keys
|
||||
|
||||
Bolt stores its keys in byte-sorted order within a bucket. This makes sequential
|
||||
iteration over these keys extremely fast. To iterate over keys we'll use a
|
||||
`Cursor`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
c := b.Cursor()
|
||||
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
The cursor allows you to move to a specific point in the list of keys and move
|
||||
forward or backward through the keys one at a time.
|
||||
|
||||
The following functions are available on the cursor:
|
||||
|
||||
```
|
||||
First() Move to the first key.
|
||||
Last() Move to the last key.
|
||||
Seek() Move to a specific key.
|
||||
Next() Move to the next key.
|
||||
Prev() Move to the previous key.
|
||||
```
|
||||
|
||||
Each of those functions has a return signature of `(key []byte, value []byte)`.
|
||||
When you have iterated to the end of the cursor then `Next()` will return a
|
||||
`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()`
|
||||
before calling `Next()` or `Prev()`. If you do not seek to a position then
|
||||
these functions will return a `nil` key.
|
||||
|
||||
During iteration, if the key is non-`nil` but the value is `nil`, that means
|
||||
the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to
|
||||
access the sub-bucket.
|
||||
|
||||
|
||||
#### Prefix scans
|
||||
|
||||
To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
c := tx.Bucket([]byte("MyBucket")).Cursor()
|
||||
|
||||
prefix := []byte("1234")
|
||||
for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
#### Range scans
|
||||
|
||||
Another common use case is scanning over a range such as a time range. If you
|
||||
use a sortable time encoding such as RFC3339 then you can query a specific
|
||||
date range like this:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume our events bucket exists and has RFC3339 encoded time keys.
|
||||
c := tx.Bucket([]byte("Events")).Cursor()
|
||||
|
||||
// Our time range spans the 90's decade.
|
||||
min := []byte("1990-01-01T00:00:00Z")
|
||||
max := []byte("2000-01-01T00:00:00Z")
|
||||
|
||||
// Iterate over the 90's.
|
||||
for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() {
|
||||
fmt.Printf("%s: %s\n", k, v)
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable.
|
||||
|
||||
|
||||
#### ForEach()
|
||||
|
||||
You can also use the function `ForEach()` if you know you'll be iterating over
|
||||
all the keys in a bucket:
|
||||
|
||||
```go
|
||||
db.View(func(tx *bolt.Tx) error {
|
||||
// Assume bucket exists and has keys
|
||||
b := tx.Bucket([]byte("MyBucket"))
|
||||
|
||||
b.ForEach(func(k, v []byte) error {
|
||||
fmt.Printf("key=%s, value=%s\n", k, v)
|
||||
return nil
|
||||
})
|
||||
return nil
|
||||
})
|
||||
```
|
||||
|
||||
|
||||
### Nested buckets
|
||||
|
||||
You can also store a bucket in a key to create nested buckets. The API is the
|
||||
same as the bucket management API on the `DB` object:
|
||||
|
||||
```go
|
||||
func (*Bucket) CreateBucket(key []byte) (*Bucket, error)
|
||||
func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error)
|
||||
func (*Bucket) DeleteBucket(key []byte) error
|
||||
```
|
||||
|
||||
|
||||
### Database backups
|
||||
|
||||
Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()`
|
||||
function to write a consistent view of the database to a writer. If you call
|
||||
this from a read-only transaction, it will perform a hot backup and not block
|
||||
your other database reads and writes.
|
||||
|
||||
By default, it will use a regular file handle which will utilize the operating
|
||||
system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx)
|
||||
documentation for information about optimizing for larger-than-RAM datasets.
|
||||
|
||||
One common use case is to backup over HTTP so you can use tools like `cURL` to
|
||||
do database backups:
|
||||
|
||||
```go
|
||||
func BackupHandleFunc(w http.ResponseWriter, req *http.Request) {
|
||||
err := db.View(func(tx *bolt.Tx) error {
|
||||
w.Header().Set("Content-Type", "application/octet-stream")
|
||||
w.Header().Set("Content-Disposition", `attachment; filename="my.db"`)
|
||||
w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size())))
|
||||
_, err := tx.WriteTo(w)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Then you can backup using this command:
|
||||
|
||||
```sh
|
||||
$ curl http://localhost/backup > my.db
|
||||
```
|
||||
|
||||
Or you can open your browser to `http://localhost/backup` and it will download
|
||||
automatically.
|
||||
|
||||
If you want to backup to another file you can use the `Tx.CopyFile()` helper
|
||||
function.
|
||||
|
||||
|
||||
### Statistics
|
||||
|
||||
The database keeps a running count of many of the internal operations it
|
||||
performs so you can better understand what's going on. By grabbing a snapshot
|
||||
of these stats at two points in time we can see what operations were performed
|
||||
in that time range.
|
||||
|
||||
For example, we could start a goroutine to log stats every 10 seconds:
|
||||
|
||||
```go
|
||||
go func() {
|
||||
// Grab the initial stats.
|
||||
prev := db.Stats()
|
||||
|
||||
for {
|
||||
// Wait for 10s.
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
// Grab the current stats and diff them.
|
||||
stats := db.Stats()
|
||||
diff := stats.Sub(&prev)
|
||||
|
||||
// Encode stats to JSON and print to STDERR.
|
||||
json.NewEncoder(os.Stderr).Encode(diff)
|
||||
|
||||
// Save stats for the next loop.
|
||||
prev = stats
|
||||
}
|
||||
}()
|
||||
```
|
||||
|
||||
It's also useful to pipe these stats to a service such as statsd for monitoring
|
||||
or to provide an HTTP endpoint that will perform a fixed-length sample.
|
||||
|
||||
|
||||
### Read-Only Mode
|
||||
|
||||
Sometimes it is useful to create a shared, read-only Bolt database. To this,
|
||||
set the `Options.ReadOnly` flag when opening your database. Read-only mode
|
||||
uses a shared lock to allow multiple processes to read from the database but
|
||||
it will block any processes from opening the database in read-write mode.
|
||||
|
||||
```go
|
||||
db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
```
|
||||
|
||||
### Mobile Use (iOS/Android)
|
||||
|
||||
Bolt is able to run on mobile devices by leveraging the binding feature of the
|
||||
[gomobile](https://github.com/golang/mobile) tool. Create a struct that will
|
||||
contain your database logic and a reference to a `*bolt.DB` with a initializing
|
||||
contstructor that takes in a filepath where the database file will be stored.
|
||||
Neither Android nor iOS require extra permissions or cleanup from using this method.
|
||||
|
||||
```go
|
||||
func NewBoltDB(filepath string) *BoltDB {
|
||||
db, err := bolt.Open(filepath+"/demo.db", 0600, nil)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
return &BoltDB{db}
|
||||
}
|
||||
|
||||
type BoltDB struct {
|
||||
db *bolt.DB
|
||||
...
|
||||
}
|
||||
|
||||
func (b *BoltDB) Path() string {
|
||||
return b.db.Path()
|
||||
}
|
||||
|
||||
func (b *BoltDB) Close() {
|
||||
b.db.Close()
|
||||
}
|
||||
```
|
||||
|
||||
Database logic should be defined as methods on this wrapper struct.
|
||||
|
||||
To initialize this struct from the native language (both platforms now sync
|
||||
their local storage to the cloud. These snippets disable that functionality for the
|
||||
database file):
|
||||
|
||||
#### Android
|
||||
|
||||
```java
|
||||
String path;
|
||||
if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){
|
||||
path = getNoBackupFilesDir().getAbsolutePath();
|
||||
} else{
|
||||
path = getFilesDir().getAbsolutePath();
|
||||
}
|
||||
Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path)
|
||||
```
|
||||
|
||||
#### iOS
|
||||
|
||||
```objc
|
||||
- (void)demo {
|
||||
NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory,
|
||||
NSUserDomainMask,
|
||||
YES) objectAtIndex:0];
|
||||
GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path);
|
||||
[self addSkipBackupAttributeToItemAtPath:demo.path];
|
||||
//Some DB Logic would go here
|
||||
[demo close];
|
||||
}
|
||||
|
||||
- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString
|
||||
{
|
||||
NSURL* URL= [NSURL fileURLWithPath: filePathString];
|
||||
assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]);
|
||||
|
||||
NSError *error = nil;
|
||||
BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES]
|
||||
forKey: NSURLIsExcludedFromBackupKey error: &error];
|
||||
if(!success){
|
||||
NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error);
|
||||
}
|
||||
return success;
|
||||
}
|
||||
|
||||
```
|
||||
|
||||
## Resources
|
||||
|
||||
For more information on getting started with Bolt, check out the following articles:
|
||||
|
||||
* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch).
|
||||
* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville
|
||||
|
||||
|
||||
## Comparison with other databases
|
||||
|
||||
### Postgres, MySQL, & other relational databases
|
||||
|
||||
Relational databases structure data into rows and are only accessible through
|
||||
the use of SQL. This approach provides flexibility in how you store and query
|
||||
your data but also incurs overhead in parsing and planning SQL statements. Bolt
|
||||
accesses all data by a byte slice key. This makes Bolt fast to read and write
|
||||
data by key but provides no built-in support for joining values together.
|
||||
|
||||
Most relational databases (with the exception of SQLite) are standalone servers
|
||||
that run separately from your application. This gives your systems
|
||||
flexibility to connect multiple application servers to a single database
|
||||
server but also adds overhead in serializing and transporting data over the
|
||||
network. Bolt runs as a library included in your application so all data access
|
||||
has to go through your application's process. This brings data closer to your
|
||||
application but limits multi-process access to the data.
|
||||
|
||||
|
||||
### LevelDB, RocksDB
|
||||
|
||||
LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that
|
||||
they are libraries bundled into the application, however, their underlying
|
||||
structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes
|
||||
random writes by using a write ahead log and multi-tiered, sorted files called
|
||||
SSTables. Bolt uses a B+tree internally and only a single file. Both approaches
|
||||
have trade-offs.
|
||||
|
||||
If you require a high random write throughput (>10,000 w/sec) or you need to use
|
||||
spinning disks then LevelDB could be a good choice. If your application is
|
||||
read-heavy or does a lot of range scans then Bolt could be a good choice.
|
||||
|
||||
One other important consideration is that LevelDB does not have transactions.
|
||||
It supports batch writing of key/values pairs and it supports read snapshots
|
||||
but it will not give you the ability to do a compare-and-swap operation safely.
|
||||
Bolt supports fully serializable ACID transactions.
|
||||
|
||||
|
||||
### LMDB
|
||||
|
||||
Bolt was originally a port of LMDB so it is architecturally similar. Both use
|
||||
a B+tree, have ACID semantics with fully serializable transactions, and support
|
||||
lock-free MVCC using a single writer and multiple readers.
|
||||
|
||||
The two projects have somewhat diverged. LMDB heavily focuses on raw performance
|
||||
while Bolt has focused on simplicity and ease of use. For example, LMDB allows
|
||||
several unsafe actions such as direct writes for the sake of performance. Bolt
|
||||
opts to disallow actions which can leave the database in a corrupted state. The
|
||||
only exception to this in Bolt is `DB.NoSync`.
|
||||
|
||||
There are also a few differences in API. LMDB requires a maximum mmap size when
|
||||
opening an `mdb_env` whereas Bolt will handle incremental mmap resizing
|
||||
automatically. LMDB overloads the getter and setter functions with multiple
|
||||
flags whereas Bolt splits these specialized cases into their own functions.
|
||||
|
||||
|
||||
## Caveats & Limitations
|
||||
|
||||
It's important to pick the right tool for the job and Bolt is no exception.
|
||||
Here are a few things to note when evaluating and using Bolt:
|
||||
|
||||
* Bolt is good for read intensive workloads. Sequential write performance is
|
||||
also fast but random writes can be slow. You can use `DB.Batch()` or add a
|
||||
write-ahead log to help mitigate this issue.
|
||||
|
||||
* Bolt uses a B+tree internally so there can be a lot of random page access.
|
||||
SSDs provide a significant performance boost over spinning disks.
|
||||
|
||||
* Try to avoid long running read transactions. Bolt uses copy-on-write so
|
||||
old pages cannot be reclaimed while an old transaction is using them.
|
||||
|
||||
* Byte slices returned from Bolt are only valid during a transaction. Once the
|
||||
transaction has been committed or rolled back then the memory they point to
|
||||
can be reused by a new page or can be unmapped from virtual memory and you'll
|
||||
see an `unexpected fault address` panic when accessing it.
|
||||
|
||||
* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for
|
||||
buckets that have random inserts will cause your database to have very poor
|
||||
page utilization.
|
||||
|
||||
* Use larger buckets in general. Smaller buckets causes poor page utilization
|
||||
once they become larger than the page size (typically 4KB).
|
||||
|
||||
* Bulk loading a lot of random writes into a new bucket can be slow as the
|
||||
page will not split until the transaction is committed. Randomly inserting
|
||||
more than 100,000 key/value pairs into a single new bucket in a single
|
||||
transaction is not advised.
|
||||
|
||||
* Bolt uses a memory-mapped file so the underlying operating system handles the
|
||||
caching of the data. Typically, the OS will cache as much of the file as it
|
||||
can in memory and will release memory as needed to other processes. This means
|
||||
that Bolt can show very high memory usage when working with large databases.
|
||||
However, this is expected and the OS will release memory as needed. Bolt can
|
||||
handle databases much larger than the available physical RAM, provided its
|
||||
memory-map fits in the process virtual address space. It may be problematic
|
||||
on 32-bits systems.
|
||||
|
||||
* The data structures in the Bolt database are memory mapped so the data file
|
||||
will be endian specific. This means that you cannot copy a Bolt file from a
|
||||
little endian machine to a big endian machine and have it work. For most
|
||||
users this is not a concern since most modern CPUs are little endian.
|
||||
|
||||
* Because of the way pages are laid out on disk, Bolt cannot truncate data files
|
||||
and return free pages back to the disk. Instead, Bolt maintains a free list
|
||||
of unused pages within its data file. These free pages can be reused by later
|
||||
transactions. This works well for many use cases as databases generally tend
|
||||
to grow. However, it's important to note that deleting large chunks of data
|
||||
will not allow you to reclaim that space on disk.
|
||||
|
||||
For more information on page allocation, [see this comment][page-allocation].
|
||||
|
||||
[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638
|
||||
|
||||
|
||||
## Reading the Source
|
||||
|
||||
Bolt is a relatively small code base (<3KLOC) for an embedded, serializable,
|
||||
transactional key/value database so it can be a good starting point for people
|
||||
interested in how databases work.
|
||||
|
||||
The best places to start are the main entry points into Bolt:
|
||||
|
||||
- `Open()` - Initializes the reference to the database. It's responsible for
|
||||
creating the database if it doesn't exist, obtaining an exclusive lock on the
|
||||
file, reading the meta pages, & memory-mapping the file.
|
||||
|
||||
- `DB.Begin()` - Starts a read-only or read-write transaction depending on the
|
||||
value of the `writable` argument. This requires briefly obtaining the "meta"
|
||||
lock to keep track of open transactions. Only one read-write transaction can
|
||||
exist at a time so the "rwlock" is acquired during the life of a read-write
|
||||
transaction.
|
||||
|
||||
- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the
|
||||
arguments, a cursor is used to traverse the B+tree to the page and position
|
||||
where they key & value will be written. Once the position is found, the bucket
|
||||
materializes the underlying page and the page's parent pages into memory as
|
||||
"nodes". These nodes are where mutations occur during read-write transactions.
|
||||
These changes get flushed to disk during commit.
|
||||
|
||||
- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor
|
||||
to move to the page & position of a key/value pair. During a read-only
|
||||
transaction, the key and value data is returned as a direct reference to the
|
||||
underlying mmap file so there's no allocation overhead. For read-write
|
||||
transactions, this data may reference the mmap file or one of the in-memory
|
||||
node values.
|
||||
|
||||
- `Cursor` - This object is simply for traversing the B+tree of on-disk pages
|
||||
or in-memory nodes. It can seek to a specific key, move to the first or last
|
||||
value, or it can move forward or backward. The cursor handles the movement up
|
||||
and down the B+tree transparently to the end user.
|
||||
|
||||
- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages
|
||||
into pages to be written to disk. Writing to disk then occurs in two phases.
|
||||
First, the dirty pages are written to disk and an `fsync()` occurs. Second, a
|
||||
new meta page with an incremented transaction ID is written and another
|
||||
`fsync()` occurs. This two phase write ensures that partially written data
|
||||
pages are ignored in the event of a crash since the meta page pointing to them
|
||||
is never written. Partially written meta pages are invalidated because they
|
||||
are written with a checksum.
|
||||
|
||||
If you have additional notes that could be helpful for others, please submit
|
||||
them via pull request.
|
||||
|
||||
|
||||
## Other Projects Using Bolt
|
||||
|
||||
Below is a list of public, open source projects that use Bolt:
|
||||
|
||||
* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard.
|
||||
* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside.
|
||||
* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb.
|
||||
* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics.
|
||||
* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects.
|
||||
* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday.
|
||||
* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations.
|
||||
* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite.
|
||||
* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin".
|
||||
* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka.
|
||||
* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed.
|
||||
* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt.
|
||||
* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site.
|
||||
* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage.
|
||||
* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters.
|
||||
* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend.
|
||||
* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend.
|
||||
* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server.
|
||||
* [SkyDB](https://github.com/skydb/sky) - Behavioral analytics database.
|
||||
* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read.
|
||||
* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics.
|
||||
* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data.
|
||||
* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system.
|
||||
* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware.
|
||||
* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs.
|
||||
* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems.
|
||||
* [stow](https://github.com/djherbis/stow) - a persistence manager for objects
|
||||
backed by boltdb.
|
||||
* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining
|
||||
simple tx and key scans.
|
||||
* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets.
|
||||
* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service
|
||||
* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service.
|
||||
* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners.
|
||||
* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores.
|
||||
* [Storm](https://github.com/asdine/storm) - A simple ORM around BoltDB.
|
||||
* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB.
|
||||
* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings.
|
||||
* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend.
|
||||
|
||||
If you are using Bolt in a project please send a pull request to add it to the list.
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue