change from vendor to dep

This commit is contained in:
Cadey Ratio 2017-10-06 08:29:20 -07:00
parent 5b4885cf6c
commit 50c1deaa7d
No known key found for this signature in database
GPG Key ID: D607EE27C2E7F89A
2468 changed files with 1080094 additions and 44874 deletions

261
Gopkg.lock generated Normal file
View File

@ -0,0 +1,261 @@
# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'.
[[projects]]
branch = "master"
name = "github.com/Xe/gopreload"
packages = ["."]
revision = "a00a8beb369cafd88bb7b32f31fc4ff3219c3565"
[[projects]]
name = "github.com/Xe/ln"
packages = [".","ex"]
revision = "466e05b2ef3e48ce08a367b6aaac09ee29a124e5"
version = "v0.1"
[[projects]]
branch = "master"
name = "github.com/Xe/uuid"
packages = ["."]
revision = "62b230097e9c9534ca2074782b25d738c4b68964"
[[projects]]
branch = "master"
name = "github.com/Xe/x"
packages = ["tools/svc/credentials/jwt"]
revision = "2e7ad18cbac67114f92ca1e1d1dc780ae21ea61f"
[[projects]]
branch = "master"
name = "github.com/aclements/go-moremath"
packages = ["mathx"]
revision = "033754ab1fee508c9f98f2785eec2365964e0b05"
[[projects]]
branch = "master"
name = "github.com/alecthomas/template"
packages = [".","parse"]
revision = "a0175ee3bccc567396460bf5acd36800cb10c49c"
[[projects]]
branch = "master"
name = "github.com/alecthomas/units"
packages = ["."]
revision = "2efee857e7cfd4f3d0138cc3cbb1b4966962b93a"
[[projects]]
name = "github.com/asdine/storm"
packages = [".","codec","codec/json","index","internal","q"]
revision = "255212403bcca439778718edf5e2d3d50744eca3"
version = "v1.1.0"
[[projects]]
name = "github.com/boltdb/bolt"
packages = ["."]
revision = "2f1ce7a837dcb8da3ec595b1dac9d0632f0f99e8"
version = "v1.3.1"
[[projects]]
branch = "master"
name = "github.com/brandur/simplebox"
packages = ["."]
revision = "84e9865bb03ad38c464043bf5382ce8c68ca5f0c"
[[projects]]
name = "github.com/caarlos0/env"
packages = ["."]
revision = "0cf029d5748c52beb2c9d20c81880cb4bdf8f788"
version = "v3.0"
[[projects]]
branch = "master"
name = "github.com/dgryski/go-failure"
packages = ["."]
revision = "4963dbd58fd03ebc6672b18f9237a9045e6ef479"
[[projects]]
branch = "master"
name = "github.com/dgryski/go-onlinestats"
packages = ["."]
revision = "1c7d1946876822b4a3a4b11b598e3aad1d05265e"
[[projects]]
branch = "master"
name = "github.com/dickeyxxx/netrc"
packages = ["."]
revision = "3acf1b3de25d89c7688c63bb45f6b07f566555ec"
[[projects]]
branch = "master"
name = "github.com/facebookgo/flagenv"
packages = ["."]
revision = "fcd59fca7456889be7f2ad4515b7612fd6acef31"
[[projects]]
branch = "master"
name = "github.com/golang/protobuf"
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"]
revision = "130e6b02ab059e7b717a096f397c5b60111cae74"
[[projects]]
name = "github.com/google/gops"
packages = ["agent","internal","signal"]
revision = "57e77c5c37da1f4e1af49f9d1fe760f146c1579e"
version = "v0.3.2"
[[projects]]
name = "github.com/joho/godotenv"
packages = [".","autoload"]
revision = "a79fa1e548e2c689c241d10173efd51e5d689d5b"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "github.com/jtolds/qod"
packages = ["."]
revision = "3abb44dfc7ba8b5cdfdb634786f57e78c7004e1c"
[[projects]]
branch = "master"
name = "github.com/kardianos/osext"
packages = ["."]
revision = "ae77be60afb1dcacde03767a8c37337fad28ac14"
[[projects]]
branch = "master"
name = "github.com/kr/pretty"
packages = ["."]
revision = "cfb55aafdaf3ec08f0db22699ab822c50091b1c4"
[[projects]]
branch = "master"
name = "github.com/kr/text"
packages = ["."]
revision = "7cafcd837844e784b526369c9bce262804aebc60"
[[projects]]
branch = "master"
name = "github.com/magefile/mage"
packages = ["mg"]
revision = "ca568f7054407930250eb570b76ee41c27c130ab"
[[projects]]
name = "github.com/mattn/go-runewidth"
packages = ["."]
revision = "9e777a8366cce605130a531d2cd6363d07ad7317"
version = "v0.0.2"
[[projects]]
branch = "master"
name = "github.com/mtneug/pkg"
packages = ["ulid"]
revision = "b270c2c35fc775243f87c58cf3f6969c5d9369d6"
[[projects]]
name = "github.com/oklog/ulid"
packages = ["."]
revision = "d311cb43c92434ec4072dfbbda3400741d0a6337"
version = "v0.3.0"
[[projects]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
packages = ["."]
revision = "a7a4c189eb47ed33ce7b35f2880070a0c82a67d4"
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
revision = "645ef00459ed84a119197bfb8d8205042c6df63d"
version = "v0.8.0"
[[projects]]
branch = "master"
name = "github.com/streamrail/concurrent-map"
packages = ["."]
revision = "8bf1e9bacbf65b10c81d0f4314cf2b1ebef728b5"
[[projects]]
branch = "master"
name = "github.com/templexxx/cpufeat"
packages = ["."]
revision = "3794dfbfb04749f896b521032f69383f24c3687e"
[[projects]]
name = "github.com/templexxx/reedsolomon"
packages = ["."]
revision = "5e06b81a1c7628d9c8d4fb7c3c4e401e37db39b4"
version = "0.1.1"
[[projects]]
name = "github.com/templexxx/xor"
packages = ["."]
revision = "0af8e873c554da75f37f2049cdffda804533d44c"
version = "0.1.2"
[[projects]]
name = "github.com/xtaci/kcp-go"
packages = ["."]
revision = "44c3d76a6b5cc9e3687f829078a52f372928e776"
version = "v3.19"
[[projects]]
name = "github.com/xtaci/smux"
packages = ["."]
revision = "ebec7ef2574b42a7088cd7751176483e0a27d458"
version = "v1.0.6"
[[projects]]
name = "go.uber.org/atomic"
packages = ["."]
revision = "4e336646b2ef9fc6e47be8e21594178f98e5ebcf"
version = "v1.2.0"
[[projects]]
branch = "master"
name = "golang.org/x/crypto"
packages = ["acme","acme/autocert","blowfish","cast5","nacl/secretbox","pbkdf2","poly1305","salsa20","salsa20/salsa","tea","twofish","xtea"]
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
[[projects]]
branch = "master"
name = "golang.org/x/net"
packages = ["bpf","context","http2","http2/hpack","idna","internal/iana","internal/socket","internal/timeseries","ipv4","lex/httplex","trace"]
revision = "a04bdaca5b32abe1c069418fb7088ae607de5bd0"
[[projects]]
branch = "master"
name = "golang.org/x/sys"
packages = ["unix"]
revision = "314a259e304ff91bd6985da2a7149bbf91237993"
[[projects]]
branch = "master"
name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"]
revision = "825fc78a2fd6fa0a5447e300189e3219e05e1f25"
[[projects]]
branch = "master"
name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"]
revision = "f676e0f3ac6395ff1a529ae59a6670878a8371a6"
[[projects]]
name = "google.golang.org/grpc"
packages = [".","codes","connectivity","credentials","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","stats","status","tap","transport"]
revision = "f92cdcd7dcdc69e81b2d7b338479a19a8723cfa3"
version = "v1.6.0"
[[projects]]
name = "gopkg.in/alecthomas/kingpin.v2"
packages = ["."]
revision = "1087e65c9441605df944fb12c33f0fe7072d18ca"
version = "v2.2.5"
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "e36c7dd8cc83fc16b10af5a8e3cfd768a3da534480cff4e0384ca1f3b4d050f5"
solver-name = "gps-cdcl"
solver-version = 1

130
Gopkg.toml Normal file
View File

@ -0,0 +1,130 @@
# Gopkg.toml example
#
# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md
# for detailed Gopkg.toml documentation.
#
# required = ["github.com/user/thing/cmd/thing"]
# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"]
#
# [[constraint]]
# name = "github.com/user/project"
# version = "1.0.0"
#
# [[constraint]]
# name = "github.com/user/project2"
# branch = "dev"
# source = "github.com/myfork/project2"
#
# [[override]]
# name = "github.com/x/y"
# version = "2.4.0"
[[constraint]]
branch = "master"
name = "github.com/Xe/gopreload"
[[constraint]]
name = "github.com/Xe/ln"
version = "0.1.0"
[[constraint]]
branch = "master"
name = "github.com/Xe/uuid"
[[constraint]]
branch = "master"
name = "github.com/Xe/x"
[[constraint]]
name = "github.com/asdine/storm"
version = "1.1.0"
[[constraint]]
branch = "master"
name = "github.com/brandur/simplebox"
[[constraint]]
name = "github.com/caarlos0/env"
version = "3.0.0"
[[constraint]]
branch = "master"
name = "github.com/dgryski/go-failure"
[[constraint]]
branch = "master"
name = "github.com/dickeyxxx/netrc"
[[constraint]]
branch = "master"
name = "github.com/facebookgo/flagenv"
[[constraint]]
branch = "master"
name = "github.com/golang/protobuf"
[[constraint]]
name = "github.com/google/gops"
version = "0.3.2"
[[constraint]]
name = "github.com/joho/godotenv"
version = "1.2.0"
[[constraint]]
branch = "master"
name = "github.com/jtolds/qod"
[[constraint]]
branch = "master"
name = "github.com/kr/pretty"
[[constraint]]
branch = "master"
name = "github.com/magefile/mage"
[[constraint]]
branch = "master"
name = "github.com/mtneug/pkg"
[[constraint]]
branch = "master"
name = "github.com/olekukonko/tablewriter"
[[constraint]]
name = "github.com/pkg/errors"
version = "0.8.0"
[[constraint]]
branch = "master"
name = "github.com/streamrail/concurrent-map"
[[constraint]]
name = "github.com/xtaci/kcp-go"
version = "3.19.0"
[[constraint]]
name = "github.com/xtaci/smux"
version = "1.0.6"
[[constraint]]
name = "go.uber.org/atomic"
version = "1.2.0"
[[constraint]]
branch = "master"
name = "golang.org/x/crypto"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.6.0"
[[constraint]]
name = "gopkg.in/alecthomas/kingpin.v2"
version = "2.2.5"

View File

@ -1,332 +0,0 @@
417badecf1ab14d0d6e38ad82397da2a59e2f6ca github.com/GoRethink/gorethink
9b48ece7fc373043054858f8c0d362665e866004 github.com/Sirupsen/logrus
62b230097e9c9534ca2074782b25d738c4b68964 (dirty) github.com/Xe/uuid
38b46760280b5500edd530aa39a8075bf22f9630 github.com/Yawning/bulb
b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3 github.com/cenk/backoff
b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3 github.com/cenkalti/backoff
fcd59fca7456889be7f2ad4515b7612fd6acef31 github.com/facebookgo/flagenv
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/proto
e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/hailocab/go-hostpool
d1caa6c97c9fc1cc9e83bbe34d0603f9ff0ce8bd github.com/hashicorp/yamux
4ed13390c0acd2ff4e371e64d8b97c8954138243 github.com/joho/godotenv
4ed13390c0acd2ff4e371e64d8b97c8954138243 github.com/joho/godotenv/autoload
8b5a689ed69b1c7cd1e3595276fc2a352d7818e0 github.com/koding/logging
1627eaec269965440f742a25a627910195ad1c7a github.com/sycamoreone/orc/tor
38b46760280b5500edd530aa39a8075bf22f9630 github.com/yawning/bulb/utils
38b46760280b5500edd530aa39a8075bf22f9630 github.com/yawning/bulb/utils/pkcs1
b8a2a83acfe6e6770b75de42d5ff4c67596675c0 golang.org/x/crypto/pbkdf2
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/proxy
6e328e67893eb46323ad06f0e92cb9536babbabc gopkg.in/fatih/pool.v2
016a1d3b4d15951ab2e39bd3596718ba94d298ba gopkg.in/gorethink/gorethink.v2/encoding
016a1d3b4d15951ab2e39bd3596718ba94d298ba gopkg.in/gorethink/gorethink.v2/ql2
016a1d3b4d15951ab2e39bd3596718ba94d298ba gopkg.in/gorethink/gorethink.v2/types
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/acme
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/acme/autocert
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/context
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/context/ctxhttp
e6ac2fc51e89a3249e82157fa0bb7a18ef9dd5bb github.com/kr/pretty
bb797dc4fb8320488f47bf11de07a733d7233e1f github.com/kr/text
3b8db5e93c4c02efbc313e17b2e796b0914a01fb go.uber.org/atomic
84e9865bb03ad38c464043bf5382ce8c68ca5f0c github.com/brandur/simplebox
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/nacl/secretbox
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/poly1305
f6b343c37ca80bfa8ea539da67a0b621f84fab1d golang.org/x/crypto/salsa20/salsa
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/jsonpb
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/proto
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/protoc-gen-go/descriptor
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/runtime
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/runtime/internal
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/third_party/googleapis/google/api
cfee3c5f91d8b8b54b216781e246443bb73b1a8e github.com/grpc-ecosystem/grpc-gateway/utilities
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/context
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/http2
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/http2/hpack
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/idna
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/internal/timeseries
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/lex/httplex
45e771701b814666a7eb299e6c7a57d0b1799e91 golang.org/x/net/trace
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/codes
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/credentials
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/grpclog
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/internal
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/metadata
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/naming
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/peer
0e6ec3a4501ee9ee2d023abe92e436fd04ed4081 google.golang.org/grpc/transport
8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/proto
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/context
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/http2
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/http2/hpack
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/idna
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/internal/timeseries
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/lex/httplex
f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/trace
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/codes
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/credentials
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/grpclog
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/internal
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/metadata
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/naming
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/peer
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/stats
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/tap
50955793b0183f9de69bd78e2ec251cf20aab121 google.golang.org/grpc/transport
49bd2f58881c34d534aa97bd64bdbdf37be0df91 github.com/Xe/ln
09cded8978dc9e80714c4d85b0322337b0a1e5e0 github.com/klauspost/cpuid
5abf0ee302ccf4834e84f63ff74eca3e8b88e4e2 github.com/klauspost/reedsolomon
b270c2c35fc775243f87c58cf3f6969c5d9369d6 github.com/mtneug/pkg/ulid
a59d62b5095aa2c7b1d220f880b0d0a4c6df3e51 github.com/oklog/ulid
ff09b135c25aae272398c51a07235b90a75aa4f0 github.com/pkg/errors
0423ef7410efae0a2b792e34bc23b01aa935e273 github.com/xtaci/kcp-go
a1a5df8f92af764f378f07d6a3dd8eb3f7aa190a github.com/xtaci/smux
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/blowfish
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/cast5
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/pbkdf2
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/salsa20
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/salsa20/salsa
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/tea
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/twofish
459e26527287adbc2adcc5d0d49abff9a5f315a7 golang.org/x/crypto/xtea
6c23252515492caf9b228a9d5cabcdbde29f7f82 golang.org/x/net/bpf
6c23252515492caf9b228a9d5cabcdbde29f7f82 golang.org/x/net/internal/iana
6c23252515492caf9b228a9d5cabcdbde29f7f82 golang.org/x/net/internal/netreflect
6c23252515492caf9b228a9d5cabcdbde29f7f82 golang.org/x/net/ipv4
8bf1e9bacbf65b10c81d0f4314cf2b1ebef728b5 github.com/streamrail/concurrent-map
a00a8beb369cafd88bb7b32f31fc4ff3219c3565 github.com/Xe/gopreload
033754ab1fee508c9f98f2785eec2365964e0b05 github.com/aclements/go-moremath/mathx
4963dbd58fd03ebc6672b18f9237a9045e6ef479 github.com/dgryski/go-failure
91a8fc22ba247b57c243ab90b49049fb734c47c3 github.com/dgryski/go-onlinestats
49bd2f58881c34d534aa97bd64bdbdf37be0df91 (dirty) github.com/Xe/ln
ff09b135c25aae272398c51a07235b90a75aa4f0 github.com/pkg/errors
f759b797c0ff6b2c514202198fe5e8ba90094c14 github.com/Xe/ln
ff09b135c25aae272398c51a07235b90a75aa4f0 github.com/pkg/errors
62f833fc9f6c4d3223bdb37bd0c2f8951bed8596 github.com/google/gops/agent
62f833fc9f6c4d3223bdb37bd0c2f8951bed8596 github.com/google/gops/internal
62f833fc9f6c4d3223bdb37bd0c2f8951bed8596 github.com/google/gops/signal
c2c54e542fb797ad986b31721e1baedf214ca413 github.com/kardianos/osext
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/codec
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/codec/json
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/index
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/internal
e8518cc1200f320e508639491f9390b9c7c37970 github.com/asdine/storm/q
e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd github.com/boltdb/bolt
9474f19b515f52326c7d197d2d097caa7fc7485e github.com/caarlos0/env
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/containers
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/trees
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/trees/redblacktree
f6c17b524822278a87e3b3bd809fec33b51f5b46 github.com/emirpasic/gods/utils
18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8 github.com/golang/protobuf/proto
18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8 github.com/golang/protobuf/ptypes/any
411e09b969b1170a9f0c467558eb4c4c110d9c77 google.golang.org/genproto/googleapis/rpc/status
0eb507a2ca07f13baf499f89d66cc566bf644643 (dirty) google.golang.org/grpc/codes
0eb507a2ca07f13baf499f89d66cc566bf644643 (dirty) google.golang.org/grpc/status
a0175ee3bccc567396460bf5acd36800cb10c49c github.com/alecthomas/template
a0175ee3bccc567396460bf5acd36800cb10c49c github.com/alecthomas/template/parse
2efee857e7cfd4f3d0138cc3cbb1b4966962b93a github.com/alecthomas/units
3acf1b3de25d89c7688c63bb45f6b07f566555ec github.com/dickeyxxx/netrc
7f0871f2e17818990e4eed73f9b5c2f429501228 gopkg.in/alecthomas/kingpin.v2
4d65901933bb7bed40783d9b9f6ae2ea2b829889 (dirty) github.com/Xe/x/tools/svc/credentials/jwt
da118f7b8e5954f39d0d2130ab35d4bf0e3cb344 golang.org/x/net/context
0eb507a2ca07f13baf499f89d66cc566bf644643 (dirty) google.golang.org/grpc/credentials
737072b4e32b7a5018b4a7125da8d12de90e8045 github.com/mattn/go-runewidth
44e365d423f4f06769182abfeeae2b91be9d529b github.com/olekukonko/tablewriter
4e1c5567d7c2dd59fa4c7c83d34c2f3528b025d6 github.com/oxtoacart/bpool
a3b3ca73af22dd09dfac218f586a8f42c681298d github.com/Xe/ln
c605e284fe17294bda444b34710735b29d1a9d90 github.com/pkg/errors
3abb44dfc7ba8b5cdfdb634786f57e78c7004e1c github.com/jtolds/qod
8351a756f30f1297fe94bbf4b767ec589c6ea6d0 golang.org/x/net/context
8351a756f30f1297fe94bbf4b767ec589c6ea6d0 golang.org/x/net/context/ctxhttp
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/proto
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/any
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/duration
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/timestamp
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/context
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2/hpack
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/idna
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/timeseries
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/lex/httplex
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/trace
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/secure/bidirule
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/transform
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/bidi
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/norm
1e559d0a00eef8a9a43151db4665280bd8dd5886 google.golang.org/genproto/googleapis/rpc/status
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/balancer
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/codes
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/connectivity
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/credentials
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/grpclb/grpc_lb_v1/messages
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/grpclog
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/internal
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/keepalive
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/metadata
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/naming
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/peer
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/resolver
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/stats
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/status
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/tap
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/transport
9ac9c42358f7c3c69ac9f8610e8790d7c338e85d github.com/go-resty/resty
23def4e6c14b4da8ac2ed8007337bc5eb5007998 github.com/golang/glog
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/jsonpb
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/proto
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/protoc-gen-go/descriptor
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/protoc-gen-go/generator
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/protoc-gen-go/plugin
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/any
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/duration
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/empty
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/struct
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/timestamp
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples/clients/abe
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples/clients/echo
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples/examplepb
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples/server
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples/server/cmd/example-server
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples/sub
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/examples/sub2
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/descriptor
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/generator
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/gengateway
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/protoc-gen-grpc-gateway/httprule
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/protoc-gen-swagger/genswagger
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/runtime
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/runtime/internal
8bec008bd140852468324fdd069ed8e38d37a963 github.com/grpc-ecosystem/grpc-gateway/utilities
6724a57986aff9bff1a1770e9347036def7c89f6 github.com/rogpeppe/fastuuid
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/context
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2/hpack
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/idna
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/timeseries
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/lex/httplex
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/publicsuffix
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/trace
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/secure/bidirule
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/transform
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/bidi
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/norm
1e559d0a00eef8a9a43151db4665280bd8dd5886 google.golang.org/genproto/googleapis/api/annotations
1e559d0a00eef8a9a43151db4665280bd8dd5886 google.golang.org/genproto/googleapis/rpc/status
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/balancer
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/codes
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/connectivity
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/credentials
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/grpclb/grpc_lb_v1/messages
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/grpclog
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/internal
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/keepalive
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/metadata
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/naming
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/peer
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/resolver
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/stats
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/status
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/tap
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/transport
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/context
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/context/ctxhttp
7d9177d70076375b9a59c8fde23d52d9c4a7ecd5 golang.org/x/crypto/ssh/terminal
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/bpf
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/context
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/context/ctxhttp
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/dict
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/dns/dnsmessage
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/html
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/html/atom
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/html/charset
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2/h2i
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2/hpack
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/icmp
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/idna
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/iana
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/nettest
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/socket
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/timeseries
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/ipv4
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/ipv6
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/lex/httplex
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/nettest
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/netutil
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/proxy
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/publicsuffix
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/route
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/trace
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/webdav
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/webdav/internal/xml
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/websocket
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/xsrftoken
b6e1ae21643682ce023deb8d152024597b0e9bb4 golang.org/x/sys/unix
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/charmap
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/htmlindex
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/internal
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/internal/identifier
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/japanese
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/korean
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/simplifiedchinese
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/traditionalchinese
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/encoding/unicode
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/internal/tag
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/internal/utf8internal
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/language
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/runes
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/secure/bidirule
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/transform
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/bidi
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/norm
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/grpclog
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/proto
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/any
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/duration
130e6b02ab059e7b717a096f397c5b60111cae74 github.com/golang/protobuf/ptypes/timestamp
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/context
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/http2/hpack
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/idna
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/timeseries
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/lex/httplex
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/trace
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/secure/bidirule
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/transform
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/bidi
1cbadb444a806fd9430d14ad08967ed91da4fa0a golang.org/x/text/unicode/norm
1e559d0a00eef8a9a43151db4665280bd8dd5886 google.golang.org/genproto/googleapis/rpc/status
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/balancer
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/codes
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/connectivity
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/credentials
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/grpclb/grpc_lb_v1/messages
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/grpclog
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/internal
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/keepalive
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/metadata
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/naming
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/peer
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/resolver
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/stats
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/status
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/tap
8443e311d3925f5e20494496790670942ed48504 google.golang.org/grpc/transport
466e05b2ef3e48ce08a367b6aaac09ee29a124e5 github.com/Xe/ln
2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb github.com/pkg/errors
466e05b2ef3e48ce08a367b6aaac09ee29a124e5 github.com/Xe/ln
466e05b2ef3e48ce08a367b6aaac09ee29a124e5 github.com/Xe/ln/ex
2b3a18b5f0fb6b4f9190549597d3f962c02bc5eb github.com/pkg/errors
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/internal/timeseries
0a9397675ba34b2845f758fe3cd68828369c6517 golang.org/x/net/trace

View File

@ -1,522 +0,0 @@
package gorethink
import (
"fmt"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/Sirupsen/logrus"
"github.com/cenk/backoff"
"github.com/hailocab/go-hostpool"
)
// A Cluster represents a connection to a RethinkDB cluster, a cluster is created
// by the Session and should rarely be created manually.
//
// The cluster keeps track of all nodes in the cluster and if requested can listen
// for cluster changes and start tracking a new node if one appears. Currently
// nodes are removed from the pool if they become unhealthy (100 failed queries).
// This should hopefully soon be replaced by a backoff system.
type Cluster struct {
opts *ConnectOpts
mu sync.RWMutex
seeds []Host // Initial host nodes specified by user.
hp hostpool.HostPool
nodes map[string]*Node // Active nodes in cluster.
closed bool
nodeIndex int64
}
// NewCluster creates a new cluster by connecting to the given hosts.
func NewCluster(hosts []Host, opts *ConnectOpts) (*Cluster, error) {
c := &Cluster{
hp: hostpool.NewEpsilonGreedy([]string{}, opts.HostDecayDuration, &hostpool.LinearEpsilonValueCalculator{}),
seeds: hosts,
opts: opts,
}
// Attempt to connect to each host and discover any additional hosts if host
// discovery is enabled
if err := c.connectNodes(c.getSeeds()); err != nil {
return nil, err
}
if !c.IsConnected() {
return nil, ErrNoConnectionsStarted
}
if opts.DiscoverHosts {
go c.discover()
}
return c, nil
}
// Query executes a ReQL query using the cluster to connect to the database
func (c *Cluster) Query(q Query) (cursor *Cursor, err error) {
for i := 0; i < c.numRetries(); i++ {
var node *Node
var hpr hostpool.HostPoolResponse
node, hpr, err = c.GetNextNode()
if err != nil {
return nil, err
}
cursor, err = node.Query(q)
hpr.Mark(err)
if !shouldRetryQuery(q, err) {
break
}
}
return cursor, err
}
// Exec executes a ReQL query using the cluster to connect to the database
func (c *Cluster) Exec(q Query) (err error) {
for i := 0; i < c.numRetries(); i++ {
var node *Node
var hpr hostpool.HostPoolResponse
node, hpr, err = c.GetNextNode()
if err != nil {
return err
}
err = node.Exec(q)
hpr.Mark(err)
if !shouldRetryQuery(q, err) {
break
}
}
return err
}
// Server returns the server name and server UUID being used by a connection.
func (c *Cluster) Server() (response ServerResponse, err error) {
for i := 0; i < c.numRetries(); i++ {
var node *Node
var hpr hostpool.HostPoolResponse
node, hpr, err = c.GetNextNode()
if err != nil {
return ServerResponse{}, err
}
response, err = node.Server()
hpr.Mark(err)
// This query should not fail so retry if any error is detected
if err == nil {
break
}
}
return response, err
}
// SetInitialPoolCap sets the initial capacity of the connection pool.
func (c *Cluster) SetInitialPoolCap(n int) {
for _, node := range c.GetNodes() {
node.SetInitialPoolCap(n)
}
}
// SetMaxIdleConns sets the maximum number of connections in the idle
// connection pool.
func (c *Cluster) SetMaxIdleConns(n int) {
for _, node := range c.GetNodes() {
node.SetMaxIdleConns(n)
}
}
// SetMaxOpenConns sets the maximum number of open connections to the database.
func (c *Cluster) SetMaxOpenConns(n int) {
for _, node := range c.GetNodes() {
node.SetMaxOpenConns(n)
}
}
// Close closes the cluster
func (c *Cluster) Close(optArgs ...CloseOpts) error {
if c.closed {
return nil
}
for _, node := range c.GetNodes() {
err := node.Close(optArgs...)
if err != nil {
return err
}
}
c.hp.Close()
c.closed = true
return nil
}
// discover attempts to find new nodes in the cluster using the current nodes
func (c *Cluster) discover() {
// Keep retrying with exponential backoff.
b := backoff.NewExponentialBackOff()
// Never finish retrying (max interval is still 60s)
b.MaxElapsedTime = 0
// Keep trying to discover new nodes
for {
backoff.RetryNotify(func() error {
// If no hosts try seeding nodes
if len(c.GetNodes()) == 0 {
c.connectNodes(c.getSeeds())
}
return c.listenForNodeChanges()
}, b, func(err error, wait time.Duration) {
Log.Debugf("Error discovering hosts %s, waiting: %s", err, wait)
})
}
}
// listenForNodeChanges listens for changes to node status using change feeds.
// This function will block until the query fails
func (c *Cluster) listenForNodeChanges() error {
// Start listening to changes from a random active node
node, hpr, err := c.GetNextNode()
if err != nil {
return err
}
q, err := newQuery(
DB("rethinkdb").Table("server_status").Changes(),
map[string]interface{}{},
c.opts,
)
if err != nil {
return fmt.Errorf("Error building query: %s", err)
}
cursor, err := node.Query(q)
if err != nil {
hpr.Mark(err)
return err
}
// Keep reading node status updates from changefeed
var result struct {
NewVal nodeStatus `gorethink:"new_val"`
OldVal nodeStatus `gorethink:"old_val"`
}
for cursor.Next(&result) {
addr := fmt.Sprintf("%s:%d", result.NewVal.Network.Hostname, result.NewVal.Network.ReqlPort)
addr = strings.ToLower(addr)
switch result.NewVal.Status {
case "connected":
// Connect to node using exponential backoff (give up after waiting 5s)
// to give the node time to start-up.
b := backoff.NewExponentialBackOff()
b.MaxElapsedTime = time.Second * 5
backoff.Retry(func() error {
node, err := c.connectNodeWithStatus(result.NewVal)
if err == nil {
if !c.nodeExists(node) {
c.addNode(node)
Log.WithFields(logrus.Fields{
"id": node.ID,
"host": node.Host.String(),
}).Debug("Connected to node")
}
}
return err
}, b)
}
}
err = cursor.Err()
hpr.Mark(err)
return err
}
func (c *Cluster) connectNodes(hosts []Host) error {
// Add existing nodes to map
nodeSet := map[string]*Node{}
for _, node := range c.GetNodes() {
nodeSet[node.ID] = node
}
var attemptErr error
// Attempt to connect to each seed host
for _, host := range hosts {
conn, err := NewConnection(host.String(), c.opts)
if err != nil {
attemptErr = err
Log.Warnf("Error creating connection: %s", err.Error())
continue
}
defer conn.Close()
if c.opts.DiscoverHosts {
q, err := newQuery(
DB("rethinkdb").Table("server_status"),
map[string]interface{}{},
c.opts,
)
if err != nil {
Log.Warnf("Error building query: %s", err)
continue
}
_, cursor, err := conn.Query(q)
if err != nil {
attemptErr = err
Log.Warnf("Error fetching cluster status: %s", err)
continue
}
var results []nodeStatus
err = cursor.All(&results)
if err != nil {
attemptErr = err
continue
}
for _, result := range results {
node, err := c.connectNodeWithStatus(result)
if err == nil {
if _, ok := nodeSet[node.ID]; !ok {
Log.WithFields(logrus.Fields{
"id": node.ID,
"host": node.Host.String(),
}).Debug("Connected to node")
nodeSet[node.ID] = node
}
} else {
attemptErr = err
Log.Warnf("Error connecting to node: %s", err)
}
}
} else {
svrRsp, err := conn.Server()
if err != nil {
attemptErr = err
Log.Warnf("Error fetching server ID: %s", err)
continue
}
node, err := c.connectNode(svrRsp.ID, []Host{host})
if err == nil {
if _, ok := nodeSet[node.ID]; !ok {
Log.WithFields(logrus.Fields{
"id": node.ID,
"host": node.Host.String(),
}).Debug("Connected to node")
nodeSet[node.ID] = node
}
} else {
attemptErr = err
Log.Warnf("Error connecting to node: %s", err)
}
}
}
// If no nodes were contactable then return the last error, this does not
// include driver errors such as if there was an issue building the
// query
if len(nodeSet) == 0 {
return attemptErr
}
nodes := []*Node{}
for _, node := range nodeSet {
nodes = append(nodes, node)
}
c.setNodes(nodes)
return nil
}
func (c *Cluster) connectNodeWithStatus(s nodeStatus) (*Node, error) {
aliases := make([]Host, len(s.Network.CanonicalAddresses))
for i, aliasAddress := range s.Network.CanonicalAddresses {
aliases[i] = NewHost(aliasAddress.Host, int(s.Network.ReqlPort))
}
return c.connectNode(s.ID, aliases)
}
func (c *Cluster) connectNode(id string, aliases []Host) (*Node, error) {
var pool *Pool
var err error
for len(aliases) > 0 {
pool, err = NewPool(aliases[0], c.opts)
if err != nil {
aliases = aliases[1:]
continue
}
err = pool.Ping()
if err != nil {
aliases = aliases[1:]
continue
}
// Ping successful so break out of loop
break
}
if err != nil {
return nil, err
}
if len(aliases) == 0 {
return nil, ErrInvalidNode
}
return newNode(id, aliases, c, pool), nil
}
// IsConnected returns true if cluster has nodes and is not already closed.
func (c *Cluster) IsConnected() bool {
c.mu.RLock()
closed := c.closed
c.mu.RUnlock()
return (len(c.GetNodes()) > 0) && !closed
}
// AddSeeds adds new seed hosts to the cluster.
func (c *Cluster) AddSeeds(hosts []Host) {
c.mu.Lock()
c.seeds = append(c.seeds, hosts...)
c.mu.Unlock()
}
func (c *Cluster) getSeeds() []Host {
c.mu.RLock()
seeds := c.seeds
c.mu.RUnlock()
return seeds
}
// GetNextNode returns a random node on the cluster
func (c *Cluster) GetNextNode() (*Node, hostpool.HostPoolResponse, error) {
if !c.IsConnected() {
return nil, nil, ErrNoConnections
}
c.mu.RLock()
defer c.mu.RUnlock()
nodes := c.nodes
hpr := c.hp.Get()
if n, ok := nodes[hpr.Host()]; ok {
if !n.Closed() {
return n, hpr, nil
}
}
return nil, nil, ErrNoConnections
}
// GetNodes returns a list of all nodes in the cluster
func (c *Cluster) GetNodes() []*Node {
c.mu.RLock()
nodes := make([]*Node, 0, len(c.nodes))
for _, n := range c.nodes {
nodes = append(nodes, n)
}
c.mu.RUnlock()
return nodes
}
func (c *Cluster) nodeExists(search *Node) bool {
for _, node := range c.GetNodes() {
if node.ID == search.ID {
return true
}
}
return false
}
func (c *Cluster) addNode(node *Node) {
c.mu.RLock()
nodes := append(c.GetNodes(), node)
c.mu.RUnlock()
c.setNodes(nodes)
}
func (c *Cluster) addNodes(nodesToAdd []*Node) {
c.mu.RLock()
nodes := append(c.GetNodes(), nodesToAdd...)
c.mu.RUnlock()
c.setNodes(nodes)
}
func (c *Cluster) setNodes(nodes []*Node) {
nodesMap := make(map[string]*Node, len(nodes))
hosts := make([]string, len(nodes))
for i, node := range nodes {
host := node.Host.String()
nodesMap[host] = node
hosts[i] = host
}
c.mu.Lock()
c.nodes = nodesMap
c.hp.SetHosts(hosts)
c.mu.Unlock()
}
func (c *Cluster) removeNode(nodeID string) {
nodes := c.GetNodes()
nodeArray := make([]*Node, len(nodes)-1)
count := 0
// Add nodes that are not in remove list.
for _, n := range nodes {
if n.ID != nodeID {
nodeArray[count] = n
count++
}
}
// Do sanity check to make sure assumptions are correct.
if count < len(nodeArray) {
// Resize array.
nodeArray2 := make([]*Node, count)
copy(nodeArray2, nodeArray)
nodeArray = nodeArray2
}
c.setNodes(nodeArray)
}
func (c *Cluster) nextNodeIndex() int64 {
return atomic.AddInt64(&c.nodeIndex, 1)
}
func (c *Cluster) numRetries() int {
if n := c.opts.NumRetries; n > 0 {
return n
}
return 3
}

View File

@ -1,99 +0,0 @@
// +build cluster
// +build integration
package gorethink
import (
"time"
test "gopkg.in/check.v1"
)
func (s *RethinkSuite) TestClusterDetectNewNode(c *test.C) {
session, err := Connect(ConnectOpts{
Addresses: []string{url, url2},
DiscoverHosts: true,
NodeRefreshInterval: time.Second,
})
c.Assert(err, test.IsNil)
t := time.NewTimer(time.Second * 30)
for {
select {
// Fail if deadline has passed
case <-t.C:
c.Fatal("No node was added to the cluster")
default:
// Pass if another node was added
if len(session.cluster.GetNodes()) >= 3 {
return
}
}
}
}
func (s *RethinkSuite) TestClusterRecoverAfterNoNodes(c *test.C) {
session, err := Connect(ConnectOpts{
Addresses: []string{url, url2},
DiscoverHosts: true,
NodeRefreshInterval: time.Second,
})
c.Assert(err, test.IsNil)
t := time.NewTimer(time.Second * 30)
hasHadZeroNodes := false
for {
select {
// Fail if deadline has passed
case <-t.C:
c.Fatal("No node was added to the cluster")
default:
// Check if there are no nodes
if len(session.cluster.GetNodes()) == 0 {
hasHadZeroNodes = true
}
// Pass if another node was added
if len(session.cluster.GetNodes()) >= 1 && hasHadZeroNodes {
return
}
}
}
}
func (s *RethinkSuite) TestClusterNodeHealth(c *test.C) {
session, err := Connect(ConnectOpts{
Addresses: []string{url1, url2, url3},
DiscoverHosts: true,
NodeRefreshInterval: time.Second,
InitialCap: 50,
MaxOpen: 200,
})
c.Assert(err, test.IsNil)
attempts := 0
failed := 0
seconds := 0
t := time.NewTimer(time.Second * 30)
tick := time.NewTicker(time.Second)
for {
select {
// Fail if deadline has passed
case <-tick.C:
seconds++
c.Logf("%ds elapsed", seconds)
case <-t.C:
// Execute queries for 10s and check that at most 5% of the queries fail
c.Logf("%d of the %d(%d%%) queries failed", failed, attempts, (failed / attempts))
c.Assert(failed <= 100, test.Equals, true)
return
default:
attempts++
if err := Expr(1).Exec(session); err != nil {
c.Logf("Query failed, %s", err)
failed++
}
}
}
}

View File

@ -1,63 +0,0 @@
// +build cluster
package gorethink
import (
"fmt"
"time"
test "gopkg.in/check.v1"
)
func (s *RethinkSuite) TestClusterConnect(c *test.C) {
session, err := Connect(ConnectOpts{
Addresses: []string{url1, url2, url3},
})
c.Assert(err, test.IsNil)
row, err := Expr("Hello World").Run(session)
c.Assert(err, test.IsNil)
var response string
err = row.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, "Hello World")
}
func (s *RethinkSuite) TestClusterMultipleQueries(c *test.C) {
session, err := Connect(ConnectOpts{
Addresses: []string{url1, url2, url3},
})
c.Assert(err, test.IsNil)
for i := 0; i < 1000; i++ {
row, err := Expr(fmt.Sprintf("Hello World", i)).Run(session)
c.Assert(err, test.IsNil)
var response string
err = row.One(&response)
c.Assert(err, test.IsNil)
c.Assert(response, test.Equals, fmt.Sprintf("Hello World", i))
}
}
func (s *RethinkSuite) TestClusterConnectError(c *test.C) {
var err error
_, err = Connect(ConnectOpts{
Addresses: []string{"nonexistanturl"},
Timeout: time.Second,
})
c.Assert(err, test.NotNil)
}
func (s *RethinkSuite) TestClusterConnectDatabase(c *test.C) {
session, err := Connect(ConnectOpts{
Addresses: []string{url1, url2, url3},
Database: "test2",
})
c.Assert(err, test.IsNil)
_, err = Table("test2").Run(session)
c.Assert(err, test.NotNil)
c.Assert(err.Error(), test.Equals, "gorethink: Database `test2` does not exist. in:\nr.Table(\"test2\")")
}

View File

@ -1,381 +0,0 @@
package gorethink
import (
"crypto/tls"
"encoding/binary"
"encoding/json"
"net"
"sync"
"sync/atomic"
"time"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
const (
respHeaderLen = 12
defaultKeepAlivePeriod = time.Second * 30
)
// Response represents the raw response from a query, most of the time you
// should instead use a Cursor when reading from the database.
type Response struct {
Token int64
Type p.Response_ResponseType `json:"t"`
ErrorType p.Response_ErrorType `json:"e"`
Notes []p.Response_ResponseNote `json:"n"`
Responses []json.RawMessage `json:"r"`
Backtrace []interface{} `json:"b"`
Profile interface{} `json:"p"`
}
// Connection is a connection to a rethinkdb database. Connection is not thread
// safe and should only be accessed be a single goroutine
type Connection struct {
net.Conn
address string
opts *ConnectOpts
_ [4]byte
mu sync.Mutex
token int64
cursors map[int64]*Cursor
bad bool
closed bool
}
// NewConnection creates a new connection to the database server
func NewConnection(address string, opts *ConnectOpts) (*Connection, error) {
var err error
c := &Connection{
address: address,
opts: opts,
cursors: make(map[int64]*Cursor),
}
keepAlivePeriod := defaultKeepAlivePeriod
if opts.KeepAlivePeriod > 0 {
keepAlivePeriod = opts.KeepAlivePeriod
}
// Connect to Server
nd := net.Dialer{Timeout: c.opts.Timeout, KeepAlive: keepAlivePeriod}
if c.opts.TLSConfig == nil {
c.Conn, err = nd.Dial("tcp", address)
} else {
c.Conn, err = tls.DialWithDialer(&nd, "tcp", address, c.opts.TLSConfig)
}
if err != nil {
return nil, RQLConnectionError{rqlError(err.Error())}
}
// Send handshake
handshake, err := c.handshake(opts.HandshakeVersion)
if err != nil {
return nil, err
}
if err = handshake.Send(); err != nil {
return nil, err
}
return c, nil
}
// Close closes the underlying net.Conn
func (c *Connection) Close() error {
c.mu.Lock()
defer c.mu.Unlock()
var err error
if !c.closed {
err = c.Conn.Close()
c.closed = true
c.cursors = make(map[int64]*Cursor)
}
return err
}
// Query sends a Query to the database, returning both the raw Response and a
// Cursor which should be used to view the query's response.
//
// This function is used internally by Run which should be used for most queries.
func (c *Connection) Query(q Query) (*Response, *Cursor, error) {
if c == nil {
return nil, nil, ErrConnectionClosed
}
c.mu.Lock()
if c.Conn == nil {
c.bad = true
c.mu.Unlock()
return nil, nil, ErrConnectionClosed
}
// Add token if query is a START/NOREPLY_WAIT
if q.Type == p.Query_START || q.Type == p.Query_NOREPLY_WAIT || q.Type == p.Query_SERVER_INFO {
q.Token = c.nextToken()
}
if q.Type == p.Query_START || q.Type == p.Query_NOREPLY_WAIT {
if c.opts.Database != "" {
var err error
q.Opts["db"], err = DB(c.opts.Database).Build()
if err != nil {
c.mu.Unlock()
return nil, nil, RQLDriverError{rqlError(err.Error())}
}
}
}
c.mu.Unlock()
err := c.sendQuery(q)
if err != nil {
return nil, nil, err
}
if noreply, ok := q.Opts["noreply"]; ok && noreply.(bool) {
return nil, nil, nil
}
for {
response, err := c.readResponse()
if err != nil {
return nil, nil, err
}
if response.Token == q.Token {
// If this was the requested response process and return
return c.processResponse(q, response)
} else if _, ok := c.cursors[response.Token]; ok {
// If the token is in the cursor cache then process the response
c.processResponse(q, response)
} else {
putResponse(response)
}
}
}
type ServerResponse struct {
ID string `gorethink:"id"`
Name string `gorethink:"name"`
}
// Server returns the server name and server UUID being used by a connection.
func (c *Connection) Server() (ServerResponse, error) {
var response ServerResponse
_, cur, err := c.Query(Query{
Type: p.Query_SERVER_INFO,
})
if err != nil {
return response, err
}
if err = cur.One(&response); err != nil {
return response, err
}
if err = cur.Close(); err != nil {
return response, err
}
return response, nil
}
// sendQuery marshals the Query and sends the JSON to the server.
func (c *Connection) sendQuery(q Query) error {
// Build query
b, err := json.Marshal(q.Build())
if err != nil {
return RQLDriverError{rqlError("Error building query")}
}
// Set timeout
if c.opts.WriteTimeout == 0 {
c.Conn.SetWriteDeadline(time.Time{})
} else {
c.Conn.SetWriteDeadline(time.Now().Add(c.opts.WriteTimeout))
}
// Send the JSON encoding of the query itself.
if err = c.writeQuery(q.Token, b); err != nil {
c.bad = true
return RQLConnectionError{rqlError(err.Error())}
}
return nil
}
// getToken generates the next query token, used to number requests and match
// responses with requests.
func (c *Connection) nextToken() int64 {
// requires c.token to be 64-bit aligned on ARM
return atomic.AddInt64(&c.token, 1)
}
// readResponse attempts to read a Response from the server, if no response
// could be read then an error is returned.
func (c *Connection) readResponse() (*Response, error) {
// Set timeout
if c.opts.ReadTimeout == 0 {
c.Conn.SetReadDeadline(time.Time{})
} else {
c.Conn.SetReadDeadline(time.Now().Add(c.opts.ReadTimeout))
}
// Read response header (token+length)
headerBuf := [respHeaderLen]byte{}
if _, err := c.read(headerBuf[:], respHeaderLen); err != nil {
c.bad = true
return nil, RQLConnectionError{rqlError(err.Error())}
}
responseToken := int64(binary.LittleEndian.Uint64(headerBuf[:8]))
messageLength := binary.LittleEndian.Uint32(headerBuf[8:])
// Read the JSON encoding of the Response itself.
b := make([]byte, int(messageLength))
if _, err := c.read(b, int(messageLength)); err != nil {
c.bad = true
return nil, RQLConnectionError{rqlError(err.Error())}
}
// Decode the response
var response = newCachedResponse()
if err := json.Unmarshal(b, response); err != nil {
c.bad = true
return nil, RQLDriverError{rqlError(err.Error())}
}
response.Token = responseToken
return response, nil
}
func (c *Connection) processResponse(q Query, response *Response) (*Response, *Cursor, error) {
switch response.Type {
case p.Response_CLIENT_ERROR:
return c.processErrorResponse(q, response, RQLClientError{rqlServerError{response, q.Term}})
case p.Response_COMPILE_ERROR:
return c.processErrorResponse(q, response, RQLCompileError{rqlServerError{response, q.Term}})
case p.Response_RUNTIME_ERROR:
return c.processErrorResponse(q, response, createRuntimeError(response.ErrorType, response, q.Term))
case p.Response_SUCCESS_ATOM, p.Response_SERVER_INFO:
return c.processAtomResponse(q, response)
case p.Response_SUCCESS_PARTIAL:
return c.processPartialResponse(q, response)
case p.Response_SUCCESS_SEQUENCE:
return c.processSequenceResponse(q, response)
case p.Response_WAIT_COMPLETE:
return c.processWaitResponse(q, response)
default:
putResponse(response)
return nil, nil, RQLDriverError{rqlError("Unexpected response type")}
}
}
func (c *Connection) processErrorResponse(q Query, response *Response, err error) (*Response, *Cursor, error) {
c.mu.Lock()
cursor := c.cursors[response.Token]
delete(c.cursors, response.Token)
c.mu.Unlock()
return response, cursor, err
}
func (c *Connection) processAtomResponse(q Query, response *Response) (*Response, *Cursor, error) {
// Create cursor
cursor := newCursor(c, "Cursor", response.Token, q.Term, q.Opts)
cursor.profile = response.Profile
cursor.extend(response)
return response, cursor, nil
}
func (c *Connection) processPartialResponse(q Query, response *Response) (*Response, *Cursor, error) {
cursorType := "Cursor"
if len(response.Notes) > 0 {
switch response.Notes[0] {
case p.Response_SEQUENCE_FEED:
cursorType = "Feed"
case p.Response_ATOM_FEED:
cursorType = "AtomFeed"
case p.Response_ORDER_BY_LIMIT_FEED:
cursorType = "OrderByLimitFeed"
case p.Response_UNIONED_FEED:
cursorType = "UnionedFeed"
case p.Response_INCLUDES_STATES:
cursorType = "IncludesFeed"
}
}
c.mu.Lock()
cursor, ok := c.cursors[response.Token]
if !ok {
// Create a new cursor if needed
cursor = newCursor(c, cursorType, response.Token, q.Term, q.Opts)
cursor.profile = response.Profile
c.cursors[response.Token] = cursor
}
c.mu.Unlock()
cursor.extend(response)
return response, cursor, nil
}
func (c *Connection) processSequenceResponse(q Query, response *Response) (*Response, *Cursor, error) {
c.mu.Lock()
cursor, ok := c.cursors[response.Token]
if !ok {
// Create a new cursor if needed
cursor = newCursor(c, "Cursor", response.Token, q.Term, q.Opts)
cursor.profile = response.Profile
}
delete(c.cursors, response.Token)
c.mu.Unlock()
cursor.extend(response)
return response, cursor, nil
}
func (c *Connection) processWaitResponse(q Query, response *Response) (*Response, *Cursor, error) {
c.mu.Lock()
delete(c.cursors, response.Token)
c.mu.Unlock()
return response, nil, nil
}
func (c *Connection) isBad() bool {
c.mu.Lock()
defer c.mu.Unlock()
return c.bad
}
var responseCache = make(chan *Response, 16)
func newCachedResponse() *Response {
select {
case r := <-responseCache:
return r
default:
return new(Response)
}
}
func putResponse(r *Response) {
*r = Response{} // zero it
select {
case responseCache <- r:
default:
}
}

View File

@ -1,450 +0,0 @@
package gorethink
import (
"bufio"
"crypto/hmac"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/binary"
"encoding/json"
"fmt"
"hash"
"io"
"strconv"
"strings"
"golang.org/x/crypto/pbkdf2"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
type HandshakeVersion int
const (
HandshakeV1_0 HandshakeVersion = iota
HandshakeV0_4
)
type connectionHandshake interface {
Send() error
}
func (c *Connection) handshake(version HandshakeVersion) (connectionHandshake, error) {
switch version {
case HandshakeV0_4:
return &connectionHandshakeV0_4{conn: c}, nil
case HandshakeV1_0:
return &connectionHandshakeV1_0{conn: c}, nil
default:
return nil, fmt.Errorf("Unrecognised handshake version")
}
}
type connectionHandshakeV0_4 struct {
conn *Connection
}
func (c *connectionHandshakeV0_4) Send() error {
// Send handshake request
if err := c.writeHandshakeReq(); err != nil {
c.conn.Close()
return RQLConnectionError{rqlError(err.Error())}
}
// Read handshake response
if err := c.readHandshakeSuccess(); err != nil {
c.conn.Close()
return RQLConnectionError{rqlError(err.Error())}
}
return nil
}
func (c *connectionHandshakeV0_4) writeHandshakeReq() error {
pos := 0
dataLen := 4 + 4 + len(c.conn.opts.AuthKey) + 4
data := make([]byte, dataLen)
// Send the protocol version to the server as a 4-byte little-endian-encoded integer
binary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_V0_4))
pos += 4
// Send the length of the auth key to the server as a 4-byte little-endian-encoded integer
binary.LittleEndian.PutUint32(data[pos:], uint32(len(c.conn.opts.AuthKey)))
pos += 4
// Send the auth key as an ASCII string
if len(c.conn.opts.AuthKey) > 0 {
pos += copy(data[pos:], c.conn.opts.AuthKey)
}
// Send the protocol type as a 4-byte little-endian-encoded integer
binary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_JSON))
pos += 4
return c.conn.writeData(data)
}
func (c *connectionHandshakeV0_4) readHandshakeSuccess() error {
reader := bufio.NewReader(c.conn.Conn)
line, err := reader.ReadBytes('\x00')
if err != nil {
if err == io.EOF {
return fmt.Errorf("Unexpected EOF: %s", string(line))
}
return err
}
// convert to string and remove trailing NUL byte
response := string(line[:len(line)-1])
if response != "SUCCESS" {
response = strings.TrimSpace(response)
// we failed authorization or something else terrible happened
return RQLDriverError{rqlError(fmt.Sprintf("Server dropped connection with message: \"%s\"", response))}
}
return nil
}
const (
handshakeV1_0_protocolVersionNumber = 0
handshakeV1_0_authenticationMethod = "SCRAM-SHA-256"
)
type connectionHandshakeV1_0 struct {
conn *Connection
reader *bufio.Reader
authMsg string
}
func (c *connectionHandshakeV1_0) Send() error {
c.reader = bufio.NewReader(c.conn.Conn)
// Generate client nonce
clientNonce, err := c.generateNonce()
if err != nil {
c.conn.Close()
return RQLDriverError{rqlError(fmt.Sprintf("Failed to generate client nonce: %s", err))}
}
// Send client first message
if err := c.writeFirstMessage(clientNonce); err != nil {
c.conn.Close()
return err
}
// Read status
if err := c.checkServerVersions(); err != nil {
c.conn.Close()
return err
}
// Read server first message
i, salt, serverNonce, err := c.readFirstMessage()
if err != nil {
c.conn.Close()
return err
}
// Check server nonce
if !strings.HasPrefix(serverNonce, clientNonce) {
return RQLAuthError{RQLDriverError{rqlError("Invalid nonce from server")}}
}
// Generate proof
saltedPass := c.saltPassword(i, salt)
clientProof := c.calculateProof(saltedPass, clientNonce, serverNonce)
serverSignature := c.serverSignature(saltedPass)
// Send client final message
if err := c.writeFinalMessage(serverNonce, clientProof); err != nil {
c.conn.Close()
return err
}
// Read server final message
if err := c.readFinalMessage(serverSignature); err != nil {
c.conn.Close()
return err
}
return nil
}
func (c *connectionHandshakeV1_0) writeFirstMessage(clientNonce string) error {
// Default username to admin if not set
username := "admin"
if c.conn.opts.Username != "" {
username = c.conn.opts.Username
}
c.authMsg = fmt.Sprintf("n=%s,r=%s", username, clientNonce)
msg := fmt.Sprintf(
`{"protocol_version": %d,"authentication": "n,,%s","authentication_method": "%s"}`,
handshakeV1_0_protocolVersionNumber, c.authMsg, handshakeV1_0_authenticationMethod,
)
pos := 0
dataLen := 4 + len(msg) + 1
data := make([]byte, dataLen)
// Send the protocol version to the server as a 4-byte little-endian-encoded integer
binary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_V1_0))
pos += 4
// Send the auth message as an ASCII string
pos += copy(data[pos:], msg)
// Add null terminating byte
data[pos] = '\x00'
return c.writeData(data)
}
func (c *connectionHandshakeV1_0) checkServerVersions() error {
b, err := c.readResponse()
if err != nil {
return err
}
// Read status
type versionsResponse struct {
Success bool `json:"success"`
MinProtocolVersion int `json:"min_protocol_version"`
MaxProtocolVersion int `json:"max_protocol_version"`
ServerVersion string `json:"server_version"`
ErrorCode int `json:"error_code"`
Error string `json:"error"`
}
var rsp *versionsResponse
statusStr := string(b)
if err := json.Unmarshal(b, &rsp); err != nil {
if strings.HasPrefix(statusStr, "ERROR: ") {
statusStr = strings.TrimPrefix(statusStr, "ERROR: ")
return RQLConnectionError{rqlError(statusStr)}
}
return RQLDriverError{rqlError(fmt.Sprintf("Error reading versions: %s", err))}
}
if !rsp.Success {
return c.handshakeError(rsp.ErrorCode, rsp.Error)
}
if rsp.MinProtocolVersion > handshakeV1_0_protocolVersionNumber ||
rsp.MaxProtocolVersion < handshakeV1_0_protocolVersionNumber {
return RQLDriverError{rqlError(
fmt.Sprintf(
"Unsupported protocol version %d, expected between %d and %d.",
handshakeV1_0_protocolVersionNumber,
rsp.MinProtocolVersion,
rsp.MaxProtocolVersion,
),
)}
}
return nil
}
func (c *connectionHandshakeV1_0) readFirstMessage() (i int64, salt []byte, serverNonce string, err error) {
b, err2 := c.readResponse()
if err2 != nil {
err = err2
return
}
// Read server message
type firstMessageResponse struct {
Success bool `json:"success"`
Authentication string `json:"authentication"`
ErrorCode int `json:"error_code"`
Error string `json:"error"`
}
var rsp *firstMessageResponse
if err2 := json.Unmarshal(b, &rsp); err2 != nil {
err = RQLDriverError{rqlError(fmt.Sprintf("Error parsing auth response: %s", err2))}
return
}
if !rsp.Success {
err = c.handshakeError(rsp.ErrorCode, rsp.Error)
return
}
c.authMsg += ","
c.authMsg += rsp.Authentication
// Parse authentication field
auth := map[string]string{}
parts := strings.Split(rsp.Authentication, ",")
for _, part := range parts {
i := strings.Index(part, "=")
if i != -1 {
auth[part[:i]] = part[i+1:]
}
}
// Extract return values
if v, ok := auth["i"]; ok {
i, err = strconv.ParseInt(v, 10, 64)
if err != nil {
return
}
}
if v, ok := auth["s"]; ok {
salt, err = base64.StdEncoding.DecodeString(v)
if err != nil {
return
}
}
if v, ok := auth["r"]; ok {
serverNonce = v
}
return
}
func (c *connectionHandshakeV1_0) writeFinalMessage(serverNonce, clientProof string) error {
authMsg := "c=biws,r="
authMsg += serverNonce
authMsg += ",p="
authMsg += clientProof
msg := fmt.Sprintf(`{"authentication": "%s"}`, authMsg)
pos := 0
dataLen := len(msg) + 1
data := make([]byte, dataLen)
// Send the auth message as an ASCII string
pos += copy(data[pos:], msg)
// Add null terminating byte
data[pos] = '\x00'
return c.writeData(data)
}
func (c *connectionHandshakeV1_0) readFinalMessage(serverSignature string) error {
b, err := c.readResponse()
if err != nil {
return err
}
// Read server message
type finalMessageResponse struct {
Success bool `json:"success"`
Authentication string `json:"authentication"`
ErrorCode int `json:"error_code"`
Error string `json:"error"`
}
var rsp *finalMessageResponse
if err := json.Unmarshal(b, &rsp); err != nil {
return RQLDriverError{rqlError(fmt.Sprintf("Error parsing auth response: %s", err))}
}
if !rsp.Success {
return c.handshakeError(rsp.ErrorCode, rsp.Error)
}
// Parse authentication field
auth := map[string]string{}
parts := strings.Split(rsp.Authentication, ",")
for _, part := range parts {
i := strings.Index(part, "=")
if i != -1 {
auth[part[:i]] = part[i+1:]
}
}
// Validate server response
if serverSignature != auth["v"] {
return RQLAuthError{RQLDriverError{rqlError("Invalid server signature")}}
}
return nil
}
func (c *connectionHandshakeV1_0) writeData(data []byte) error {
if err := c.conn.writeData(data); err != nil {
return RQLConnectionError{rqlError(err.Error())}
}
return nil
}
func (c *connectionHandshakeV1_0) readResponse() ([]byte, error) {
line, err := c.reader.ReadBytes('\x00')
if err != nil {
if err == io.EOF {
return nil, RQLConnectionError{rqlError(fmt.Sprintf("Unexpected EOF: %s", string(line)))}
}
return nil, RQLConnectionError{rqlError(err.Error())}
}
// Strip null byte and return
return line[:len(line)-1], nil
}
func (c *connectionHandshakeV1_0) generateNonce() (string, error) {
const nonceSize = 24
b := make([]byte, nonceSize)
_, err := rand.Read(b)
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(b), nil
}
func (c *connectionHandshakeV1_0) saltPassword(iter int64, salt []byte) []byte {
pass := []byte(c.conn.opts.Password)
return pbkdf2.Key(pass, salt, int(iter), sha256.Size, sha256.New)
}
func (c *connectionHandshakeV1_0) calculateProof(saltedPass []byte, clientNonce, serverNonce string) string {
// Generate proof
c.authMsg += ",c=biws,r=" + serverNonce
mac := hmac.New(c.hashFunc(), saltedPass)
mac.Write([]byte("Client Key"))
clientKey := mac.Sum(nil)
hash := c.hashFunc()()
hash.Write(clientKey)
storedKey := hash.Sum(nil)
mac = hmac.New(c.hashFunc(), storedKey)
mac.Write([]byte(c.authMsg))
clientSignature := mac.Sum(nil)
clientProof := make([]byte, len(clientKey))
for i, _ := range clientKey {
clientProof[i] = clientKey[i] ^ clientSignature[i]
}
return base64.StdEncoding.EncodeToString(clientProof)
}
func (c *connectionHandshakeV1_0) serverSignature(saltedPass []byte) string {
mac := hmac.New(c.hashFunc(), saltedPass)
mac.Write([]byte("Server Key"))
serverKey := mac.Sum(nil)
mac = hmac.New(c.hashFunc(), serverKey)
mac.Write([]byte(c.authMsg))
serverSignature := mac.Sum(nil)
return base64.StdEncoding.EncodeToString(serverSignature)
}
func (c *connectionHandshakeV1_0) handshakeError(code int, message string) error {
if code >= 10 || code <= 20 {
return RQLAuthError{RQLDriverError{rqlError(message)}}
}
return RQLDriverError{rqlError(message)}
}
func (c *connectionHandshakeV1_0) hashFunc() func() hash.Hash {
return sha256.New
}

View File

@ -1,41 +0,0 @@
package gorethink
import "encoding/binary"
// Write 'data' to conn
func (c *Connection) writeData(data []byte) error {
_, err := c.Conn.Write(data[:])
return err
}
func (c *Connection) read(buf []byte, length int) (total int, err error) {
var n int
for total < length {
if n, err = c.Conn.Read(buf[total:length]); err != nil {
break
}
total += n
}
return total, err
}
func (c *Connection) writeQuery(token int64, q []byte) error {
pos := 0
dataLen := 8 + 4 + len(q)
data := make([]byte, dataLen)
// Send the protocol version to the server as a 4-byte little-endian-encoded integer
binary.LittleEndian.PutUint64(data[pos:], uint64(token))
pos += 8
// Send the length of the auth key to the server as a 4-byte little-endian-encoded integer
binary.LittleEndian.PutUint32(data[pos:], uint32(len(q)))
pos += 4
// Send the auth key as an ASCII string
pos += copy(data[pos:], q)
return c.writeData(data)
}

View File

@ -1,710 +0,0 @@
package gorethink
import (
"bytes"
"encoding/json"
"errors"
"reflect"
"sync"
"gopkg.in/gorethink/gorethink.v2/encoding"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
var (
errNilCursor = errors.New("cursor is nil")
errCursorClosed = errors.New("connection closed, cannot read cursor")
)
func newCursor(conn *Connection, cursorType string, token int64, term *Term, opts map[string]interface{}) *Cursor {
if cursorType == "" {
cursorType = "Cursor"
}
connOpts := &ConnectOpts{}
if conn != nil {
connOpts = conn.opts
}
cursor := &Cursor{
conn: conn,
connOpts: connOpts,
token: token,
cursorType: cursorType,
term: term,
opts: opts,
buffer: make([]interface{}, 0),
responses: make([]json.RawMessage, 0),
}
return cursor
}
// Cursor is the result of a query. Its cursor starts before the first row
// of the result set. A Cursor is not thread safe and should only be accessed
// by a single goroutine at any given time. Use Next to advance through the
// rows:
//
// cursor, err := query.Run(session)
// ...
// defer cursor.Close()
//
// var response interface{}
// for cursor.Next(&response) {
// ...
// }
// err = cursor.Err() // get any error encountered during iteration
// ...
type Cursor struct {
releaseConn func() error
conn *Connection
connOpts *ConnectOpts
token int64
cursorType string
term *Term
opts map[string]interface{}
mu sync.RWMutex
lastErr error
fetching bool
closed bool
finished bool
isAtom bool
isSingleValue bool
pendingSkips int
buffer []interface{}
responses []json.RawMessage
profile interface{}
}
// Profile returns the information returned from the query profiler.
func (c *Cursor) Profile() interface{} {
if c == nil {
return nil
}
c.mu.RLock()
defer c.mu.RUnlock()
return c.profile
}
// Type returns the cursor type (by default "Cursor")
func (c *Cursor) Type() string {
if c == nil {
return "Cursor"
}
c.mu.RLock()
defer c.mu.RUnlock()
return c.cursorType
}
// Err returns nil if no errors happened during iteration, or the actual
// error otherwise.
func (c *Cursor) Err() error {
if c == nil {
return errNilCursor
}
c.mu.RLock()
defer c.mu.RUnlock()
return c.lastErr
}
// Close closes the cursor, preventing further enumeration. If the end is
// encountered, the cursor is closed automatically. Close is idempotent.
func (c *Cursor) Close() error {
if c == nil {
return errNilCursor
}
c.mu.Lock()
defer c.mu.Unlock()
var err error
// If cursor is already closed return immediately
closed := c.closed
if closed {
return nil
}
// Get connection and check its valid, don't need to lock as this is only
// set when the cursor is created
conn := c.conn
if conn == nil {
return nil
}
if conn.Conn == nil {
return nil
}
// Stop any unfinished queries
if !c.finished {
q := Query{
Type: p.Query_STOP,
Token: c.token,
Opts: map[string]interface{}{
"noreply": true,
},
}
_, _, err = conn.Query(q)
}
if c.releaseConn != nil {
if err := c.releaseConn(); err != nil {
return err
}
}
c.closed = true
c.conn = nil
c.buffer = nil
c.responses = nil
return err
}
// Next retrieves the next document from the result set, blocking if necessary.
// This method will also automatically retrieve another batch of documents from
// the server when the current one is exhausted, or before that in background
// if possible.
//
// Next returns true if a document was successfully unmarshalled onto result,
// and false at the end of the result set or if an error happened.
// When Next returns false, the Err method should be called to verify if
// there was an error during iteration.
//
// Also note that you are able to reuse the same variable multiple times as
// `Next` zeroes the value before scanning in the result.
func (c *Cursor) Next(dest interface{}) bool {
if c == nil {
return false
}
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return false
}
hasMore, err := c.nextLocked(dest, true)
if c.handleErrorLocked(err) != nil {
c.mu.Unlock()
c.Close()
return false
}
c.mu.Unlock()
if !hasMore {
c.Close()
}
return hasMore
}
func (c *Cursor) nextLocked(dest interface{}, progressCursor bool) (bool, error) {
for {
if err := c.seekCursor(true); err != nil {
return false, err
}
if c.closed {
return false, nil
}
if len(c.buffer) == 0 && c.finished {
return false, nil
}
if len(c.buffer) > 0 {
data := c.buffer[0]
if progressCursor {
c.buffer = c.buffer[1:]
}
err := encoding.Decode(dest, data)
if err != nil {
return false, err
}
return true, nil
}
}
}
// Peek behaves similarly to Next, retreiving the next document from the result set
// and blocking if necessary. Peek, however, does not progress the position of the cursor.
// This can be useful for expressions which can return different types to attempt to
// decode them into different interfaces.
//
// Like Next, it will also automatically retrieve another batch of documents from
// the server when the current one is exhausted, or before that in background
// if possible.
//
// Unlike Next, Peek does not progress the position of the cursor. Peek
// will return errors from decoding, but they will not be persisted in the cursor
// and therefore will not be available on cursor.Err(). This can be useful for
// expressions that can return different types to attempt to decode them into
// different interfaces.
//
// Peek returns true if a document was successfully unmarshalled onto result,
// and false at the end of the result set or if an error happened. Peek also
// returns the error (if any) that occured
func (c *Cursor) Peek(dest interface{}) (bool, error) {
if c == nil {
return false, errNilCursor
}
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return false, nil
}
hasMore, err := c.nextLocked(dest, false)
if _, isDecodeErr := err.(*encoding.DecodeTypeError); isDecodeErr {
c.mu.Unlock()
return false, err
}
if c.handleErrorLocked(err) != nil {
c.mu.Unlock()
c.Close()
return false, err
}
c.mu.Unlock()
return hasMore, nil
}
// Skip progresses the cursor by one record. It is useful after a successful
// Peek to avoid duplicate decoding work.
func (c *Cursor) Skip() {
if c == nil {
return
}
c.mu.Lock()
defer c.mu.Unlock()
c.pendingSkips++
}
// NextResponse retrieves the next raw response from the result set, blocking if necessary.
// Unlike Next the returned response is the raw JSON document returned from the
// database.
//
// NextResponse returns false (and a nil byte slice) at the end of the result
// set or if an error happened.
func (c *Cursor) NextResponse() ([]byte, bool) {
if c == nil {
return nil, false
}
c.mu.Lock()
if c.closed {
c.mu.Unlock()
return nil, false
}
b, hasMore, err := c.nextResponseLocked()
if c.handleErrorLocked(err) != nil {
c.mu.Unlock()
c.Close()
return nil, false
}
c.mu.Unlock()
if !hasMore {
c.Close()
}
return b, hasMore
}
func (c *Cursor) nextResponseLocked() ([]byte, bool, error) {
for {
if err := c.seekCursor(false); err != nil {
return nil, false, err
}
if len(c.responses) == 0 && c.finished {
return nil, false, nil
}
if len(c.responses) > 0 {
var response json.RawMessage
response, c.responses = c.responses[0], c.responses[1:]
return []byte(response), true, nil
}
}
}
// All retrieves all documents from the result set into the provided slice
// and closes the cursor.
//
// The result argument must necessarily be the address for a slice. The slice
// may be nil or previously allocated.
//
// Also note that you are able to reuse the same variable multiple times as
// `All` zeroes the value before scanning in the result. It also attempts
// to reuse the existing slice without allocating any more space by either
// resizing or returning a selection of the slice if necessary.
func (c *Cursor) All(result interface{}) error {
if c == nil {
return errNilCursor
}
resultv := reflect.ValueOf(result)
if resultv.Kind() != reflect.Ptr || resultv.Elem().Kind() != reflect.Slice {
panic("result argument must be a slice address")
}
slicev := resultv.Elem()
slicev = slicev.Slice(0, slicev.Cap())
elemt := slicev.Type().Elem()
i := 0
for {
if slicev.Len() == i {
elemp := reflect.New(elemt)
if !c.Next(elemp.Interface()) {
break
}
slicev = reflect.Append(slicev, elemp.Elem())
slicev = slicev.Slice(0, slicev.Cap())
} else {
if !c.Next(slicev.Index(i).Addr().Interface()) {
break
}
}
i++
}
resultv.Elem().Set(slicev.Slice(0, i))
if err := c.Err(); err != nil {
c.Close()
return err
}
if err := c.Close(); err != nil {
return err
}
return nil
}
// One retrieves a single document from the result set into the provided
// slice and closes the cursor.
//
// Also note that you are able to reuse the same variable multiple times as
// `One` zeroes the value before scanning in the result.
func (c *Cursor) One(result interface{}) error {
if c == nil {
return errNilCursor
}
if c.IsNil() {
c.Close()
return ErrEmptyResult
}
hasResult := c.Next(result)
if err := c.Err(); err != nil {
c.Close()
return err
}
if err := c.Close(); err != nil {
return err
}
if !hasResult {
return ErrEmptyResult
}
return nil
}
// Interface retrieves all documents from the result set and returns the data
// as an interface{} and closes the cursor.
//
// If the query returns multiple documents then a slice will be returned,
// otherwise a single value will be returned.
func (c *Cursor) Interface() (interface{}, error) {
if c == nil {
return nil, errNilCursor
}
var results []interface{}
var result interface{}
for c.Next(&result) {
results = append(results, result)
}
if err := c.Err(); err != nil {
return nil, err
}
c.mu.RLock()
isSingleValue := c.isSingleValue
c.mu.RUnlock()
if isSingleValue {
if len(results) == 0 {
return nil, nil
}
return results[0], nil
}
return results, nil
}
// Listen listens for rows from the database and sends the result onto the given
// channel. The type that the row is scanned into is determined by the element
// type of the channel.
//
// Also note that this function returns immediately.
//
// cursor, err := r.Expr([]int{1,2,3}).Run(session)
// if err != nil {
// panic(err)
// }
//
// ch := make(chan int)
// cursor.Listen(ch)
// <- ch // 1
// <- ch // 2
// <- ch // 3
func (c *Cursor) Listen(channel interface{}) {
go func() {
channelv := reflect.ValueOf(channel)
if channelv.Kind() != reflect.Chan {
panic("input argument must be a channel")
}
elemt := channelv.Type().Elem()
for {
elemp := reflect.New(elemt)
if !c.Next(elemp.Interface()) {
break
}
channelv.Send(elemp.Elem())
}
c.Close()
channelv.Close()
}()
}
// IsNil tests if the current row is nil.
func (c *Cursor) IsNil() bool {
if c == nil {
return true
}
c.mu.RLock()
defer c.mu.RUnlock()
if len(c.buffer) > 0 {
return c.buffer[0] == nil
}
if len(c.responses) > 0 {
response := c.responses[0]
if response == nil {
return true
}
if string(response) == "null" {
return true
}
return false
}
return true
}
// fetchMore fetches more rows from the database.
//
// If wait is true then it will wait for the database to reply otherwise it
// will return after sending the continue query.
func (c *Cursor) fetchMore() error {
var err error
if !c.fetching {
c.fetching = true
if c.closed {
return errCursorClosed
}
q := Query{
Type: p.Query_CONTINUE,
Token: c.token,
}
c.mu.Unlock()
_, _, err = c.conn.Query(q)
c.mu.Lock()
}
return err
}
// handleError sets the value of lastErr to err if lastErr is not yet set.
func (c *Cursor) handleError(err error) error {
c.mu.Lock()
defer c.mu.Unlock()
return c.handleErrorLocked(err)
}
func (c *Cursor) handleErrorLocked(err error) error {
if c.lastErr == nil {
c.lastErr = err
}
return c.lastErr
}
// extend adds the result of a continue query to the cursor.
func (c *Cursor) extend(response *Response) {
c.mu.Lock()
defer c.mu.Unlock()
c.extendLocked(response)
}
func (c *Cursor) extendLocked(response *Response) {
c.responses = append(c.responses, response.Responses...)
c.finished = response.Type != p.Response_SUCCESS_PARTIAL
c.fetching = false
c.isAtom = response.Type == p.Response_SUCCESS_ATOM
putResponse(response)
}
// seekCursor takes care of loading more data if needed and applying pending skips
//
// bufferResponse determines whether the response will be parsed into the buffer
func (c *Cursor) seekCursor(bufferResponse bool) error {
if c.lastErr != nil {
return c.lastErr
}
if len(c.buffer) == 0 && len(c.responses) == 0 && c.closed {
return errCursorClosed
}
// Loop over loading data, applying skips as necessary and loading more data as needed
// until either the cursor is closed or finished, or we have applied all outstanding
// skips and data is available
for {
c.applyPendingSkips(bufferResponse) // if we are buffering the responses, skip can drain from the buffer
if bufferResponse && len(c.buffer) == 0 && len(c.responses) > 0 {
if err := c.bufferNextResponse(); err != nil {
return err
}
continue // go around the loop again to re-apply pending skips
} else if len(c.buffer) == 0 && len(c.responses) == 0 && !c.finished {
// We skipped all of our data, load some more
if err := c.fetchMore(); err != nil {
return err
}
if c.closed {
return nil
}
continue // go around the loop again to re-apply pending skips
}
return nil
}
}
// applyPendingSkips applies all pending skips to the buffer and
// returns whether there are more pending skips to be applied
//
// if drainFromBuffer is true, we will drain from the buffer, otherwise
// we drain from the responses
func (c *Cursor) applyPendingSkips(drainFromBuffer bool) (stillPending bool) {
if c.pendingSkips == 0 {
return false
}
if drainFromBuffer {
if len(c.buffer) > c.pendingSkips {
c.buffer = c.buffer[c.pendingSkips:]
c.pendingSkips = 0
return false
}
c.pendingSkips -= len(c.buffer)
c.buffer = c.buffer[:0]
return c.pendingSkips > 0
}
if len(c.responses) > c.pendingSkips {
c.responses = c.responses[c.pendingSkips:]
c.pendingSkips = 0
return false
}
c.pendingSkips -= len(c.responses)
c.responses = c.responses[:0]
return c.pendingSkips > 0
}
// bufferResponse reads a single response and stores the result into the buffer
// if the response is from an atomic response, it will check if the
// response contains multiple records and store them all into the buffer
func (c *Cursor) bufferNextResponse() error {
if c.closed {
return errCursorClosed
}
// If there are no responses, nothing to do
if len(c.responses) == 0 {
return nil
}
response := c.responses[0]
c.responses = c.responses[1:]
var value interface{}
decoder := json.NewDecoder(bytes.NewBuffer(response))
if c.connOpts.UseJSONNumber {
decoder.UseNumber()
}
err := decoder.Decode(&value)
if err != nil {
return err
}
value, err = recursivelyConvertPseudotype(value, c.opts)
if err != nil {
return err
}
// If response is an ATOM then try and convert to an array
if data, ok := value.([]interface{}); ok && c.isAtom {
c.buffer = append(c.buffer, data...)
} else if value == nil {
c.buffer = append(c.buffer, nil)
} else {
c.buffer = append(c.buffer, value)
// If this is the only value in the response and the response was an
// atom then set the single value flag
if c.isAtom {
c.isSingleValue = true
}
}
return nil
}

View File

@ -1,6 +0,0 @@
// Package gorethink implements a Go driver for RethinkDB
//
// Current version: v3.0.0 (RethinkDB v2.3)
// For more in depth information on how to use RethinkDB check out the API docs
// at http://rethinkdb.com/api
package gorethink

View File

@ -1,182 +0,0 @@
package gorethink
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"strings"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
var (
// ErrNoHosts is returned when no hosts to the Connect method.
ErrNoHosts = errors.New("no hosts provided")
// ErrNoConnectionsStarted is returned when the driver couldn't to any of
// the provided hosts.
ErrNoConnectionsStarted = errors.New("no connections were made when creating the session")
// ErrInvalidNode is returned when attempting to connect to a node which
// returns an invalid response.
ErrInvalidNode = errors.New("invalid node")
// ErrNoConnections is returned when there are no active connections in the
// clusters connection pool.
ErrNoConnections = errors.New("gorethink: no connections were available")
// ErrConnectionClosed is returned when trying to send a query with a closed
// connection.
ErrConnectionClosed = errors.New("gorethink: the connection is closed")
)
func printCarrots(t Term, frames []*p.Frame) string {
var frame *p.Frame
if len(frames) > 1 {
frame, frames = frames[0], frames[1:]
} else if len(frames) == 1 {
frame, frames = frames[0], []*p.Frame{}
}
for i, arg := range t.args {
if frame.GetPos() == int64(i) {
t.args[i] = Term{
termType: p.Term_DATUM,
data: printCarrots(arg, frames),
}
}
}
for k, arg := range t.optArgs {
if frame.GetOpt() == k {
t.optArgs[k] = Term{
termType: p.Term_DATUM,
data: printCarrots(arg, frames),
}
}
}
b := &bytes.Buffer{}
for _, c := range t.String() {
if c != '^' {
b.WriteString(" ")
} else {
b.WriteString("^")
}
}
return b.String()
}
// Error constants
var ErrEmptyResult = errors.New("The result does not contain any more rows")
// Connection/Response errors
// rqlResponseError is the base type for all errors, it formats both
// for the response and query if set.
type rqlServerError struct {
response *Response
term *Term
}
func (e rqlServerError) Error() string {
var err = "An error occurred"
if e.response != nil {
json.Unmarshal(e.response.Responses[0], &err)
}
if e.term == nil {
return fmt.Sprintf("gorethink: %s", err)
}
return fmt.Sprintf("gorethink: %s in:\n%s", err, e.term.String())
}
func (e rqlServerError) String() string {
return e.Error()
}
type rqlError string
func (e rqlError) Error() string {
return fmt.Sprintf("gorethink: %s", string(e))
}
func (e rqlError) String() string {
return e.Error()
}
// Exported Error "Implementations"
type RQLClientError struct{ rqlServerError }
type RQLCompileError struct{ rqlServerError }
type RQLDriverCompileError struct{ RQLCompileError }
type RQLServerCompileError struct{ RQLCompileError }
type RQLAuthError struct{ RQLDriverError }
type RQLRuntimeError struct{ rqlServerError }
type RQLQueryLogicError struct{ RQLRuntimeError }
type RQLNonExistenceError struct{ RQLQueryLogicError }
type RQLResourceLimitError struct{ RQLRuntimeError }
type RQLUserError struct{ RQLRuntimeError }
type RQLInternalError struct{ RQLRuntimeError }
type RQLTimeoutError struct{ rqlServerError }
type RQLAvailabilityError struct{ RQLRuntimeError }
type RQLOpFailedError struct{ RQLAvailabilityError }
type RQLOpIndeterminateError struct{ RQLAvailabilityError }
// RQLDriverError represents an unexpected error with the driver, if this error
// persists please create an issue.
type RQLDriverError struct {
rqlError
}
// RQLConnectionError represents an error when communicating with the database
// server.
type RQLConnectionError struct {
rqlError
}
func createRuntimeError(errorType p.Response_ErrorType, response *Response, term *Term) error {
serverErr := rqlServerError{response, term}
switch errorType {
case p.Response_QUERY_LOGIC:
return RQLQueryLogicError{RQLRuntimeError{serverErr}}
case p.Response_NON_EXISTENCE:
return RQLNonExistenceError{RQLQueryLogicError{RQLRuntimeError{serverErr}}}
case p.Response_RESOURCE_LIMIT:
return RQLResourceLimitError{RQLRuntimeError{serverErr}}
case p.Response_USER:
return RQLUserError{RQLRuntimeError{serverErr}}
case p.Response_INTERNAL:
return RQLInternalError{RQLRuntimeError{serverErr}}
case p.Response_OP_FAILED:
return RQLOpFailedError{RQLAvailabilityError{RQLRuntimeError{serverErr}}}
case p.Response_OP_INDETERMINATE:
return RQLOpIndeterminateError{RQLAvailabilityError{RQLRuntimeError{serverErr}}}
default:
return RQLRuntimeError{serverErr}
}
}
// Error type helpers
// IsConflictErr returns true if the error is non-nil and the query failed
// due to a duplicate primary key.
func IsConflictErr(err error) bool {
if err == nil {
return false
}
return strings.HasPrefix(err.Error(), "Duplicate primary key")
}
// IsTypeErr returns true if the error is non-nil and the query failed due
// to a type error.
func IsTypeErr(err error) bool {
if err == nil {
return false
}
return strings.HasPrefix(err.Error(), "Expected type")
}

View File

@ -1,58 +0,0 @@
package gorethink
import (
"io/ioutil"
"reflect"
"github.com/Sirupsen/logrus"
"gopkg.in/gorethink/gorethink.v2/encoding"
)
var (
Log *logrus.Logger
)
const (
SystemDatabase = "rethinkdb"
TableConfigSystemTable = "table_config"
ServerConfigSystemTable = "server_config"
DBConfigSystemTable = "db_config"
ClusterConfigSystemTable = "cluster_config"
TableStatusSystemTable = "table_status"
ServerStatusSystemTable = "server_status"
CurrentIssuesSystemTable = "current_issues"
UsersSystemTable = "users"
PermissionsSystemTable = "permissions"
JobsSystemTable = "jobs"
StatsSystemTable = "stats"
LogsSystemTable = "logs"
)
func init() {
// Set encoding package
encoding.IgnoreType(reflect.TypeOf(Term{}))
Log = logrus.New()
Log.Out = ioutil.Discard // By default don't log anything
}
// SetVerbose allows the driver logging level to be set. If true is passed then
// the log level is set to Debug otherwise it defaults to Info.
func SetVerbose(verbose bool) {
if verbose {
Log.Level = logrus.DebugLevel
return
}
Log.Level = logrus.InfoLevel
}
// SetTags allows you to override the tags used when decoding or encoding
// structs. The driver will check for the tags in the same order that they were
// passed into this function. If no parameters are passed then the driver will
// default to checking for the gorethink tag (the gorethink tag is always included)
func SetTags(tags ...string) {
encoding.Tags = append(tags, "gorethink")
}

View File

@ -1,24 +0,0 @@
package gorethink
import (
"fmt"
)
// Host name and port of server
type Host struct {
Name string
Port int
}
// NewHost create a new Host
func NewHost(name string, port int) Host {
return Host{
Name: name,
Port: port,
}
}
// Returns host address (name:port)
func (h Host) String() string {
return fmt.Sprintf("%s:%d", h.Name, h.Port)
}

View File

@ -1,394 +0,0 @@
package gorethink
import (
"encoding/json"
"fmt"
"reflect"
"sync"
"time"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Mocking is based on the amazing package github.com/stretchr/testify
// testingT is an interface wrapper around *testing.T
type testingT interface {
Logf(format string, args ...interface{})
Errorf(format string, args ...interface{})
FailNow()
}
// MockAnything can be used in place of any term, this is useful when you want
// mock similar queries or queries that you don't quite know the exact structure
// of.
func MockAnything() Term {
t := constructRootTerm("MockAnything", p.Term_DATUM, nil, nil)
t.isMockAnything = true
return t
}
func (t Term) MockAnything() Term {
t = constructMethodTerm(t, "MockAnything", p.Term_DATUM, nil, nil)
t.isMockAnything = true
return t
}
// MockQuery represents a mocked query and is used for setting expectations,
// as well as recording activity.
type MockQuery struct {
parent *Mock
// Holds the query and term
Query Query
// Holds the JSON representation of query
BuiltQuery []byte
// Holds the response that should be returned when this method is executed.
Response interface{}
// Holds the error that should be returned when this method is executed.
Error error
// The number of times to return the return arguments when setting
// expectations. 0 means to always return the value.
Repeatability int
// Holds a channel that will be used to block the Return until it either
// recieves a message or is closed. nil means it returns immediately.
WaitFor <-chan time.Time
// Amount of times this query has been executed
executed int
}
func newMockQuery(parent *Mock, q Query) *MockQuery {
// Build and marshal term
builtQuery, err := json.Marshal(q.Build())
if err != nil {
panic(fmt.Sprintf("Failed to build query: %s", err))
}
return &MockQuery{
parent: parent,
Query: q,
BuiltQuery: builtQuery,
Response: make([]interface{}, 0),
Repeatability: 0,
WaitFor: nil,
}
}
func newMockQueryFromTerm(parent *Mock, t Term, opts map[string]interface{}) *MockQuery {
q, err := parent.newQuery(t, opts)
if err != nil {
panic(fmt.Sprintf("Failed to build query: %s", err))
}
return newMockQuery(parent, q)
}
func (mq *MockQuery) lock() {
mq.parent.mu.Lock()
}
func (mq *MockQuery) unlock() {
mq.parent.mu.Unlock()
}
// Return specifies the return arguments for the expectation.
//
// mock.On(r.Table("test")).Return(nil, errors.New("failed"))
func (mq *MockQuery) Return(response interface{}, err error) *MockQuery {
mq.lock()
defer mq.unlock()
mq.Response = response
mq.Error = err
return mq
}
// Once indicates that that the mock should only return the value once.
//
// mock.On(r.Table("test")).Return(result, nil).Once()
func (mq *MockQuery) Once() *MockQuery {
return mq.Times(1)
}
// Twice indicates that that the mock should only return the value twice.
//
// mock.On(r.Table("test")).Return(result, nil).Twice()
func (mq *MockQuery) Twice() *MockQuery {
return mq.Times(2)
}
// Times indicates that that the mock should only return the indicated number
// of times.
//
// mock.On(r.Table("test")).Return(result, nil).Times(5)
func (mq *MockQuery) Times(i int) *MockQuery {
mq.lock()
defer mq.unlock()
mq.Repeatability = i
return mq
}
// WaitUntil sets the channel that will block the mock's return until its closed
// or a message is received.
//
// mock.On(r.Table("test")).WaitUntil(time.After(time.Second))
func (mq *MockQuery) WaitUntil(w <-chan time.Time) *MockQuery {
mq.lock()
defer mq.unlock()
mq.WaitFor = w
return mq
}
// After sets how long to block until the query returns
//
// mock.On(r.Table("test")).After(time.Second)
func (mq *MockQuery) After(d time.Duration) *MockQuery {
return mq.WaitUntil(time.After(d))
}
// On chains a new expectation description onto the mocked interface. This
// allows syntax like.
//
// Mock.
// On(r.Table("test")).Return(result, nil).
// On(r.Table("test2")).Return(nil, errors.New("Some Error"))
func (mq *MockQuery) On(t Term) *MockQuery {
return mq.parent.On(t)
}
// Mock is used to mock query execution and verify that the expected queries are
// being executed. Mocks are used by creating an instance using NewMock and then
// passing this when running your queries instead of a session. For example:
//
// mock := r.NewMock()
// mock.On(r.Table("test")).Return([]interface{}{data}, nil)
//
// cursor, err := r.Table("test").Run(mock)
//
// mock.AssertExpectations(t)
type Mock struct {
mu sync.Mutex
opts ConnectOpts
ExpectedQueries []*MockQuery
Queries []MockQuery
}
// NewMock creates an instance of Mock, you can optionally pass ConnectOpts to
// the function, if passed any mocked query will be generated using those
// options.
func NewMock(opts ...ConnectOpts) *Mock {
m := &Mock{
ExpectedQueries: make([]*MockQuery, 0),
Queries: make([]MockQuery, 0),
}
if len(opts) > 0 {
m.opts = opts[0]
}
return m
}
// On starts a description of an expectation of the specified query
// being executed.
//
// mock.On(r.Table("test"))
func (m *Mock) On(t Term, opts ...map[string]interface{}) *MockQuery {
var qopts map[string]interface{}
if len(opts) > 0 {
qopts = opts[0]
}
m.mu.Lock()
defer m.mu.Unlock()
mq := newMockQueryFromTerm(m, t, qopts)
m.ExpectedQueries = append(m.ExpectedQueries, mq)
return mq
}
// AssertExpectations asserts that everything specified with On and Return was
// in fact executed as expected. Queries may have been executed in any order.
func (m *Mock) AssertExpectations(t testingT) bool {
var somethingMissing bool
var failedExpectations int
// iterate through each expectation
expectedQueries := m.expectedQueries()
for _, expectedQuery := range expectedQueries {
if !m.queryWasExecuted(expectedQuery) && expectedQuery.executed == 0 {
somethingMissing = true
failedExpectations++
t.Logf("❌\t%s", expectedQuery.Query.Term.String())
} else {
m.mu.Lock()
if expectedQuery.Repeatability > 0 {
somethingMissing = true
failedExpectations++
} else {
t.Logf("✅\t%s", expectedQuery.Query.Term.String())
}
m.mu.Unlock()
}
}
if somethingMissing {
t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe query you are testing needs to be executed %d more times(s).", len(expectedQueries)-failedExpectations, len(expectedQueries), failedExpectations)
}
return !somethingMissing
}
// AssertNumberOfExecutions asserts that the query was executed expectedExecutions times.
func (m *Mock) AssertNumberOfExecutions(t testingT, expectedQuery *MockQuery, expectedExecutions int) bool {
var actualExecutions int
for _, query := range m.queries() {
if query.Query.Term.compare(*expectedQuery.Query.Term, map[int64]int64{}) && query.Repeatability > -1 {
// if bytes.Equal(query.BuiltQuery, expectedQuery.BuiltQuery) {
actualExecutions++
}
}
if expectedExecutions != actualExecutions {
t.Errorf("Expected number of executions (%d) does not match the actual number of executions (%d).", expectedExecutions, actualExecutions)
return false
}
return true
}
// AssertExecuted asserts that the method was executed.
// It can produce a false result when an argument is a pointer type and the underlying value changed after executing the mocked method.
func (m *Mock) AssertExecuted(t testingT, expectedQuery *MockQuery) bool {
if !m.queryWasExecuted(expectedQuery) {
t.Errorf("The query \"%s\" should have been executed, but was not.", expectedQuery.Query.Term.String())
return false
}
return true
}
// AssertNotExecuted asserts that the method was not executed.
// It can produce a false result when an argument is a pointer type and the underlying value changed after executing the mocked method.
func (m *Mock) AssertNotExecuted(t testingT, expectedQuery *MockQuery) bool {
if m.queryWasExecuted(expectedQuery) {
t.Errorf("The query \"%s\" was executed, but should NOT have been.", expectedQuery.Query.Term.String())
return false
}
return true
}
func (m *Mock) IsConnected() bool {
return true
}
func (m *Mock) Query(q Query) (*Cursor, error) {
found, query := m.findExpectedQuery(q)
if found < 0 {
panic(fmt.Sprintf("gorethink: mock: This query was unexpected:\n\t\t%s", q.Term.String()))
} else {
m.mu.Lock()
switch {
case query.Repeatability == 1:
query.Repeatability = -1
query.executed++
case query.Repeatability > 1:
query.Repeatability--
query.executed++
case query.Repeatability == 0:
query.executed++
}
m.mu.Unlock()
}
// add the query
m.mu.Lock()
m.Queries = append(m.Queries, *newMockQuery(m, q))
m.mu.Unlock()
// block if specified
if query.WaitFor != nil {
<-query.WaitFor
}
// Return error without building cursor if non-nil
if query.Error != nil {
return nil, query.Error
}
// Build cursor and return
c := newCursor(nil, "", query.Query.Token, query.Query.Term, query.Query.Opts)
c.finished = true
c.fetching = false
c.isAtom = true
responseVal := reflect.ValueOf(query.Response)
if responseVal.Kind() == reflect.Slice || responseVal.Kind() == reflect.Array {
for i := 0; i < responseVal.Len(); i++ {
c.buffer = append(c.buffer, responseVal.Index(i).Interface())
}
} else {
c.buffer = append(c.buffer, query.Response)
}
return c, nil
}
func (m *Mock) Exec(q Query) error {
_, err := m.Query(q)
return err
}
func (m *Mock) newQuery(t Term, opts map[string]interface{}) (Query, error) {
return newQuery(t, opts, &m.opts)
}
func (m *Mock) findExpectedQuery(q Query) (int, *MockQuery) {
m.mu.Lock()
defer m.mu.Unlock()
for i, query := range m.ExpectedQueries {
// if bytes.Equal(query.BuiltQuery, builtQuery) && query.Repeatability > -1 {
if query.Query.Term.compare(*q.Term, map[int64]int64{}) && query.Repeatability > -1 {
return i, query
}
}
return -1, nil
}
func (m *Mock) queryWasExecuted(expectedQuery *MockQuery) bool {
for _, query := range m.queries() {
if query.Query.Term.compare(*expectedQuery.Query.Term, map[int64]int64{}) {
// if bytes.Equal(query.BuiltQuery, expectedQuery.BuiltQuery) {
return true
}
}
// we didn't find the expected query
return false
}
func (m *Mock) expectedQueries() []*MockQuery {
m.mu.Lock()
defer m.mu.Unlock()
return append([]*MockQuery{}, m.ExpectedQueries...)
}
func (m *Mock) queries() []MockQuery {
m.mu.Lock()
defer m.mu.Unlock()
return append([]MockQuery{}, m.Queries...)
}

View File

@ -1,133 +0,0 @@
package gorethink
import (
"sync"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Node represents a database server in the cluster
type Node struct {
ID string
Host Host
aliases []Host
cluster *Cluster
pool *Pool
mu sync.RWMutex
closed bool
}
func newNode(id string, aliases []Host, cluster *Cluster, pool *Pool) *Node {
node := &Node{
ID: id,
Host: aliases[0],
aliases: aliases,
cluster: cluster,
pool: pool,
}
return node
}
// Closed returns true if the node is closed
func (n *Node) Closed() bool {
n.mu.RLock()
defer n.mu.RUnlock()
return n.closed
}
// Close closes the session
func (n *Node) Close(optArgs ...CloseOpts) error {
n.mu.Lock()
defer n.mu.Unlock()
if n.closed {
return nil
}
if len(optArgs) >= 1 {
if optArgs[0].NoReplyWait {
n.NoReplyWait()
}
}
if n.pool != nil {
n.pool.Close()
}
n.pool = nil
n.closed = true
return nil
}
// SetInitialPoolCap sets the initial capacity of the connection pool.
func (n *Node) SetInitialPoolCap(idleConns int) {
n.pool.SetInitialPoolCap(idleConns)
}
// SetMaxIdleConns sets the maximum number of connections in the idle
// connection pool.
func (n *Node) SetMaxIdleConns(idleConns int) {
n.pool.SetMaxIdleConns(idleConns)
}
// SetMaxOpenConns sets the maximum number of open connections to the database.
func (n *Node) SetMaxOpenConns(openConns int) {
n.pool.SetMaxOpenConns(openConns)
}
// NoReplyWait ensures that previous queries with the noreply flag have been
// processed by the server. Note that this guarantee only applies to queries
// run on the given connection
func (n *Node) NoReplyWait() error {
return n.pool.Exec(Query{
Type: p.Query_NOREPLY_WAIT,
})
}
// Query executes a ReQL query using this nodes connection pool.
func (n *Node) Query(q Query) (cursor *Cursor, err error) {
if n.Closed() {
return nil, ErrInvalidNode
}
return n.pool.Query(q)
}
// Exec executes a ReQL query using this nodes connection pool.
func (n *Node) Exec(q Query) (err error) {
if n.Closed() {
return ErrInvalidNode
}
return n.pool.Exec(q)
}
// Server returns the server name and server UUID being used by a connection.
func (n *Node) Server() (ServerResponse, error) {
var response ServerResponse
if n.Closed() {
return response, ErrInvalidNode
}
return n.pool.Server()
}
type nodeStatus struct {
ID string `gorethink:"id"`
Name string `gorethink:"name"`
Status string `gorethink:"status"`
Network struct {
Hostname string `gorethink:"hostname"`
ClusterPort int64 `gorethink:"cluster_port"`
ReqlPort int64 `gorethink:"reql_port"`
CanonicalAddresses []struct {
Host string `gorethink:"host"`
Port int64 `gorethink:"port"`
} `gorethink:"canonical_addresses"`
} `gorethink:"network"`
}

View File

@ -1,200 +0,0 @@
package gorethink
import (
"errors"
"fmt"
"net"
"sync"
"gopkg.in/fatih/pool.v2"
)
var (
errPoolClosed = errors.New("gorethink: pool is closed")
)
// A Pool is used to store a pool of connections to a single RethinkDB server
type Pool struct {
host Host
opts *ConnectOpts
pool pool.Pool
mu sync.RWMutex // protects following fields
closed bool
}
// NewPool creates a new connection pool for the given host
func NewPool(host Host, opts *ConnectOpts) (*Pool, error) {
initialCap := opts.InitialCap
if initialCap <= 0 {
// Fallback to MaxIdle if InitialCap is zero, this should be removed
// when MaxIdle is removed
initialCap = opts.MaxIdle
}
maxOpen := opts.MaxOpen
if maxOpen <= 0 {
maxOpen = 2
}
p, err := pool.NewChannelPool(initialCap, maxOpen, func() (net.Conn, error) {
conn, err := NewConnection(host.String(), opts)
if err != nil {
return nil, err
}
return conn, err
})
if err != nil {
return nil, err
}
return &Pool{
pool: p,
host: host,
opts: opts,
}, nil
}
// Ping verifies a connection to the database is still alive,
// establishing a connection if necessary.
func (p *Pool) Ping() error {
_, pc, err := p.conn()
if err != nil {
return err
}
return pc.Close()
}
// Close closes the database, releasing any open resources.
//
// It is rare to Close a Pool, as the Pool handle is meant to be
// long-lived and shared between many goroutines.
func (p *Pool) Close() error {
p.mu.RLock()
defer p.mu.RUnlock()
if p.closed {
return nil
}
p.pool.Close()
return nil
}
func (p *Pool) conn() (*Connection, *pool.PoolConn, error) {
p.mu.RLock()
defer p.mu.RUnlock()
if p.closed {
return nil, nil, errPoolClosed
}
nc, err := p.pool.Get()
if err != nil {
return nil, nil, err
}
pc, ok := nc.(*pool.PoolConn)
if !ok {
// This should never happen!
return nil, nil, fmt.Errorf("Invalid connection in pool")
}
conn, ok := pc.Conn.(*Connection)
if !ok {
// This should never happen!
return nil, nil, fmt.Errorf("Invalid connection in pool")
}
return conn, pc, nil
}
// SetInitialPoolCap sets the initial capacity of the connection pool.
//
// Deprecated: This value should only be set when connecting
func (p *Pool) SetInitialPoolCap(n int) {
return
}
// SetMaxIdleConns sets the maximum number of connections in the idle
// connection pool.
//
// Deprecated: This value should only be set when connecting
func (p *Pool) SetMaxIdleConns(n int) {
return
}
// SetMaxOpenConns sets the maximum number of open connections to the database.
//
// Deprecated: This value should only be set when connecting
func (p *Pool) SetMaxOpenConns(n int) {
return
}
// Query execution functions
// Exec executes a query without waiting for any response.
func (p *Pool) Exec(q Query) error {
c, pc, err := p.conn()
if err != nil {
return err
}
defer pc.Close()
_, _, err = c.Query(q)
if c.isBad() {
pc.MarkUnusable()
}
return err
}
// Query executes a query and waits for the response
func (p *Pool) Query(q Query) (*Cursor, error) {
c, pc, err := p.conn()
if err != nil {
return nil, err
}
_, cursor, err := c.Query(q)
if err == nil {
cursor.releaseConn = releaseConn(c, pc)
} else if c.isBad() {
pc.MarkUnusable()
}
return cursor, err
}
// Server returns the server name and server UUID being used by a connection.
func (p *Pool) Server() (ServerResponse, error) {
var response ServerResponse
c, pc, err := p.conn()
if err != nil {
return response, err
}
defer pc.Close()
response, err = c.Server()
if c.isBad() {
pc.MarkUnusable()
}
return response, err
}
func releaseConn(c *Connection, pc *pool.PoolConn) func() error {
return func() error {
if c.isBad() {
pc.MarkUnusable()
}
return pc.Close()
}
}

View File

@ -1,235 +0,0 @@
package gorethink
import (
"encoding/base64"
"math"
"strconv"
"time"
"gopkg.in/gorethink/gorethink.v2/types"
"fmt"
)
func convertPseudotype(obj map[string]interface{}, opts map[string]interface{}) (interface{}, error) {
if reqlType, ok := obj["$reql_type$"]; ok {
if reqlType == "TIME" {
// load timeFormat, set to native if the option was not set
timeFormat := "native"
if opt, ok := opts["time_format"]; ok {
if sopt, ok := opt.(string); ok {
timeFormat = sopt
} else {
return nil, fmt.Errorf("Invalid time_format run option \"%s\".", opt)
}
}
if timeFormat == "native" {
return reqlTimeToNativeTime(obj["epoch_time"].(float64), obj["timezone"].(string))
} else if timeFormat == "raw" {
return obj, nil
} else {
return nil, fmt.Errorf("Unknown time_format run option \"%s\".", reqlType)
}
} else if reqlType == "GROUPED_DATA" {
// load groupFormat, set to native if the option was not set
groupFormat := "native"
if opt, ok := opts["group_format"]; ok {
if sopt, ok := opt.(string); ok {
groupFormat = sopt
} else {
return nil, fmt.Errorf("Invalid group_format run option \"%s\".", opt)
}
}
if groupFormat == "native" || groupFormat == "slice" {
return reqlGroupedDataToSlice(obj)
} else if groupFormat == "map" {
return reqlGroupedDataToMap(obj)
} else if groupFormat == "raw" {
return obj, nil
} else {
return nil, fmt.Errorf("Unknown group_format run option \"%s\".", reqlType)
}
} else if reqlType == "BINARY" {
binaryFormat := "native"
if opt, ok := opts["binary_format"]; ok {
if sopt, ok := opt.(string); ok {
binaryFormat = sopt
} else {
return nil, fmt.Errorf("Invalid binary_format run option \"%s\".", opt)
}
}
if binaryFormat == "native" {
return reqlBinaryToNativeBytes(obj)
} else if binaryFormat == "raw" {
return obj, nil
} else {
return nil, fmt.Errorf("Unknown binary_format run option \"%s\".", reqlType)
}
} else if reqlType == "GEOMETRY" {
geometryFormat := "native"
if opt, ok := opts["geometry_format"]; ok {
if sopt, ok := opt.(string); ok {
geometryFormat = sopt
} else {
return nil, fmt.Errorf("Invalid geometry_format run option \"%s\".", opt)
}
}
if geometryFormat == "native" {
return reqlGeometryToNativeGeometry(obj)
} else if geometryFormat == "raw" {
return obj, nil
} else {
return nil, fmt.Errorf("Unknown geometry_format run option \"%s\".", reqlType)
}
} else {
return obj, nil
}
}
return obj, nil
}
func recursivelyConvertPseudotype(obj interface{}, opts map[string]interface{}) (interface{}, error) {
var err error
switch obj := obj.(type) {
case []interface{}:
for key, val := range obj {
obj[key], err = recursivelyConvertPseudotype(val, opts)
if err != nil {
return nil, err
}
}
case map[string]interface{}:
for key, val := range obj {
obj[key], err = recursivelyConvertPseudotype(val, opts)
if err != nil {
return nil, err
}
}
pobj, err := convertPseudotype(obj, opts)
if err != nil {
return nil, err
}
return pobj, nil
}
return obj, nil
}
// Pseudo-type helper functions
func reqlTimeToNativeTime(timestamp float64, timezone string) (time.Time, error) {
sec, ms := math.Modf(timestamp)
// Convert to native time rounding to milliseconds
t := time.Unix(int64(sec), int64(math.Floor(ms*1000+0.5))*1000*1000)
// Caclulate the timezone
if timezone != "" {
hours, err := strconv.Atoi(timezone[1:3])
if err != nil {
return time.Time{}, err
}
minutes, err := strconv.Atoi(timezone[4:6])
if err != nil {
return time.Time{}, err
}
tzOffset := ((hours * 60) + minutes) * 60
if timezone[:1] == "-" {
tzOffset = 0 - tzOffset
}
t = t.In(time.FixedZone(timezone, tzOffset))
}
return t, nil
}
func reqlGroupedDataToSlice(obj map[string]interface{}) (interface{}, error) {
if data, ok := obj["data"]; ok {
ret := []interface{}{}
for _, v := range data.([]interface{}) {
v := v.([]interface{})
ret = append(ret, map[string]interface{}{
"group": v[0],
"reduction": v[1],
})
}
return ret, nil
}
return nil, fmt.Errorf("pseudo-type GROUPED_DATA object %v does not have the expected field \"data\"", obj)
}
func reqlGroupedDataToMap(obj map[string]interface{}) (interface{}, error) {
if data, ok := obj["data"]; ok {
ret := map[interface{}]interface{}{}
for _, v := range data.([]interface{}) {
v := v.([]interface{})
ret[v[0]] = v[1]
}
return ret, nil
}
return nil, fmt.Errorf("pseudo-type GROUPED_DATA object %v does not have the expected field \"data\"", obj)
}
func reqlBinaryToNativeBytes(obj map[string]interface{}) (interface{}, error) {
if data, ok := obj["data"]; ok {
if data, ok := data.(string); ok {
b, err := base64.StdEncoding.DecodeString(data)
if err != nil {
return nil, fmt.Errorf("error decoding pseudo-type BINARY object %v", obj)
}
return b, nil
}
return nil, fmt.Errorf("pseudo-type BINARY object %v field \"data\" is not valid", obj)
}
return nil, fmt.Errorf("pseudo-type BINARY object %v does not have the expected field \"data\"", obj)
}
func reqlGeometryToNativeGeometry(obj map[string]interface{}) (interface{}, error) {
if typ, ok := obj["type"]; !ok {
return nil, fmt.Errorf("pseudo-type GEOMETRY object %v does not have the expected field \"type\"", obj)
} else if typ, ok := typ.(string); !ok {
return nil, fmt.Errorf("pseudo-type GEOMETRY object %v field \"type\" is not valid", obj)
} else if coords, ok := obj["coordinates"]; !ok {
return nil, fmt.Errorf("pseudo-type GEOMETRY object %v does not have the expected field \"coordinates\"", obj)
} else if typ == "Point" {
point, err := types.UnmarshalPoint(coords)
if err != nil {
return nil, err
}
return types.Geometry{
Type: "Point",
Point: point,
}, nil
} else if typ == "LineString" {
line, err := types.UnmarshalLineString(coords)
if err != nil {
return nil, err
}
return types.Geometry{
Type: "LineString",
Line: line,
}, nil
} else if typ == "Polygon" {
lines, err := types.UnmarshalPolygon(coords)
if err != nil {
return nil, err
}
return types.Geometry{
Type: "Polygon",
Lines: lines,
}, nil
} else {
return nil, fmt.Errorf("pseudo-type GEOMETRY object %v field has unknown type %s", obj, typ)
}
}

View File

@ -1,455 +0,0 @@
package gorethink
import (
"fmt"
"reflect"
"strconv"
"strings"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// A Query represents a query ready to be sent to the database, A Query differs
// from a Term as it contains both a query type and token. These values are used
// by the database to determine if the query is continuing a previous request
// and also allows the driver to identify the response as they can come out of
// order.
type Query struct {
Type p.Query_QueryType
Token int64
Term *Term
Opts map[string]interface{}
builtTerm interface{}
}
func (q *Query) Build() []interface{} {
res := []interface{}{int(q.Type)}
if q.Term != nil {
res = append(res, q.builtTerm)
}
if len(q.Opts) > 0 {
// Clone opts and remove custom gorethink options
opts := map[string]interface{}{}
for k, v := range q.Opts {
switch k {
case "geometry_format":
default:
opts[k] = v
}
}
res = append(res, opts)
}
return res
}
type termsList []Term
type termsObj map[string]Term
// A Term represents a query that is being built. Terms consist of a an array of
// "sub-terms" and a term type. When a Term is a sub-term the first element of
// the terms data is its parent Term.
//
// When built the term becomes a JSON array, for more information on the format
// see http://rethinkdb.com/docs/writing-drivers/.
type Term struct {
name string
rawQuery bool
rootTerm bool
termType p.Term_TermType
data interface{}
args []Term
optArgs map[string]Term
lastErr error
isMockAnything bool
}
func (t Term) compare(t2 Term, varMap map[int64]int64) bool {
if t.isMockAnything || t2.isMockAnything {
return true
}
if t.name != t2.name ||
t.rawQuery != t2.rawQuery ||
t.rootTerm != t2.rootTerm ||
t.termType != t2.termType ||
!reflect.DeepEqual(t.data, t2.data) ||
len(t.args) != len(t2.args) ||
len(t.optArgs) != len(t2.optArgs) {
return false
}
for i, v := range t.args {
if t.termType == p.Term_FUNC && t2.termType == p.Term_FUNC && i == 0 {
// Functions need to be compared differently as each variable
// will have a different var ID so first try to create a mapping
// between the two sets of IDs
argsArr := t.args[0].args
argsArr2 := t2.args[0].args
if len(argsArr) != len(argsArr2) {
return false
}
for j := 0; j < len(argsArr); j++ {
varMap[argsArr[j].data.(int64)] = argsArr2[j].data.(int64)
}
} else if t.termType == p.Term_VAR && t2.termType == p.Term_VAR && i == 0 {
// When comparing vars use our var map
v1 := t.args[i].data.(int64)
v2 := t2.args[i].data.(int64)
if varMap[v1] != v2 {
return false
}
} else if !v.compare(t2.args[i], varMap) {
return false
}
}
for k, v := range t.optArgs {
if _, ok := t2.optArgs[k]; !ok {
return false
}
if !v.compare(t2.optArgs[k], varMap) {
return false
}
}
return true
}
// build takes the query tree and prepares it to be sent as a JSON
// expression
func (t Term) Build() (interface{}, error) {
var err error
if t.lastErr != nil {
return nil, t.lastErr
}
if t.rawQuery {
return t.data, nil
}
switch t.termType {
case p.Term_DATUM:
return t.data, nil
case p.Term_MAKE_OBJ:
res := map[string]interface{}{}
for k, v := range t.optArgs {
res[k], err = v.Build()
if err != nil {
return nil, err
}
}
return res, nil
case p.Term_BINARY:
if len(t.args) == 0 {
return map[string]interface{}{
"$reql_type$": "BINARY",
"data": t.data,
}, nil
}
}
args := make([]interface{}, len(t.args))
optArgs := make(map[string]interface{}, len(t.optArgs))
for i, v := range t.args {
arg, err := v.Build()
if err != nil {
return nil, err
}
args[i] = arg
}
for k, v := range t.optArgs {
optArgs[k], err = v.Build()
if err != nil {
return nil, err
}
}
ret := []interface{}{int(t.termType)}
if len(args) > 0 {
ret = append(ret, args)
}
if len(optArgs) > 0 {
ret = append(ret, optArgs)
}
return ret, nil
}
// String returns a string representation of the query tree
func (t Term) String() string {
if t.isMockAnything {
return "r.MockAnything()"
}
switch t.termType {
case p.Term_MAKE_ARRAY:
return fmt.Sprintf("[%s]", strings.Join(argsToStringSlice(t.args), ", "))
case p.Term_MAKE_OBJ:
return fmt.Sprintf("{%s}", strings.Join(optArgsToStringSlice(t.optArgs), ", "))
case p.Term_FUNC:
// Get string representation of each argument
args := []string{}
for _, v := range t.args[0].args {
args = append(args, fmt.Sprintf("var_%d", v.data))
}
return fmt.Sprintf("func(%s r.Term) r.Term { return %s }",
strings.Join(args, ", "),
t.args[1].String(),
)
case p.Term_VAR:
return fmt.Sprintf("var_%s", t.args[0])
case p.Term_IMPLICIT_VAR:
return "r.Row"
case p.Term_DATUM:
switch v := t.data.(type) {
case string:
return strconv.Quote(v)
default:
return fmt.Sprintf("%v", v)
}
case p.Term_BINARY:
if len(t.args) == 0 {
return fmt.Sprintf("r.binary(<data>)")
}
}
if t.rootTerm {
return fmt.Sprintf("r.%s(%s)", t.name, strings.Join(allArgsToStringSlice(t.args, t.optArgs), ", "))
}
if t.args == nil {
return "r"
}
return fmt.Sprintf("%s.%s(%s)", t.args[0].String(), t.name, strings.Join(allArgsToStringSlice(t.args[1:], t.optArgs), ", "))
}
// OptArgs is an interface used to represent a terms optional arguments. All
// optional argument types have a toMap function, the returned map can be encoded
// and sent as part of the query.
type OptArgs interface {
toMap() map[string]interface{}
}
func (t Term) OptArgs(args interface{}) Term {
switch args := args.(type) {
case OptArgs:
t.optArgs = convertTermObj(args.toMap())
case map[string]interface{}:
t.optArgs = convertTermObj(args)
}
return t
}
type QueryExecutor interface {
IsConnected() bool
Query(Query) (*Cursor, error)
Exec(Query) error
newQuery(t Term, opts map[string]interface{}) (Query, error)
}
// WriteResponse is a helper type used when dealing with the response of a
// write query. It is also returned by the RunWrite function.
type WriteResponse struct {
Errors int `gorethink:"errors"`
Inserted int `gorethink:"inserted"`
Updated int `gorethink:"updated"`
Unchanged int `gorethink:"unchanged"`
Replaced int `gorethink:"replaced"`
Renamed int `gorethink:"renamed"`
Skipped int `gorethink:"skipped"`
Deleted int `gorethink:"deleted"`
Created int `gorethink:"created"`
DBsCreated int `gorethink:"dbs_created"`
TablesCreated int `gorethink:"tables_created"`
Dropped int `gorethink:"dropped"`
DBsDropped int `gorethink:"dbs_dropped"`
TablesDropped int `gorethink:"tables_dropped"`
GeneratedKeys []string `gorethink:"generated_keys"`
FirstError string `gorethink:"first_error"` // populated if Errors > 0
ConfigChanges []ChangeResponse `gorethink:"config_changes"`
Changes []ChangeResponse
}
// ChangeResponse is a helper type used when dealing with changefeeds. The type
// contains both the value before the query and the new value.
type ChangeResponse struct {
NewValue interface{} `gorethink:"new_val,omitempty"`
OldValue interface{} `gorethink:"old_val,omitempty"`
State string `gorethink:"state,omitempty"`
Error string `gorethink:"error,omitempty"`
}
// RunOpts contains the optional arguments for the Run function.
type RunOpts struct {
DB interface{} `gorethink:"db,omitempty"`
Db interface{} `gorethink:"db,omitempty"` // Deprecated
Profile interface{} `gorethink:"profile,omitempty"`
Durability interface{} `gorethink:"durability,omitempty"`
UseOutdated interface{} `gorethink:"use_outdated,omitempty"` // Deprecated
ArrayLimit interface{} `gorethink:"array_limit,omitempty"`
TimeFormat interface{} `gorethink:"time_format,omitempty"`
GroupFormat interface{} `gorethink:"group_format,omitempty"`
BinaryFormat interface{} `gorethink:"binary_format,omitempty"`
GeometryFormat interface{} `gorethink:"geometry_format,omitempty"`
ReadMode interface{} `gorethink:"read_mode,omitempty"`
MinBatchRows interface{} `gorethink:"min_batch_rows,omitempty"`
MaxBatchRows interface{} `gorethink:"max_batch_rows,omitempty"`
MaxBatchBytes interface{} `gorethink:"max_batch_bytes,omitempty"`
MaxBatchSeconds interface{} `gorethink:"max_batch_seconds,omitempty"`
FirstBatchScaledownFactor interface{} `gorethink:"first_batch_scaledown_factor,omitempty"`
}
func (o RunOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Run runs a query using the given connection.
//
// rows, err := query.Run(sess)
// if err != nil {
// // error
// }
//
// var doc MyDocumentType
// for rows.Next(&doc) {
// // Do something with document
// }
func (t Term) Run(s QueryExecutor, optArgs ...RunOpts) (*Cursor, error) {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
if s == nil || !s.IsConnected() {
return nil, ErrConnectionClosed
}
q, err := s.newQuery(t, opts)
if err != nil {
return nil, err
}
return s.Query(q)
}
// RunWrite runs a query using the given connection but unlike Run automatically
// scans the result into a variable of type WriteResponse. This function should be used
// if you are running a write query (such as Insert, Update, TableCreate, etc...).
//
// If an error occurs when running the write query the first error is returned.
//
// res, err := r.DB("database").Table("table").Insert(doc).RunWrite(sess)
func (t Term) RunWrite(s QueryExecutor, optArgs ...RunOpts) (WriteResponse, error) {
var response WriteResponse
res, err := t.Run(s, optArgs...)
if err != nil {
return response, err
}
defer res.Close()
if err = res.One(&response); err != nil {
return response, err
}
if response.Errors > 0 {
return response, fmt.Errorf("%s", response.FirstError)
}
return response, nil
}
// ReadOne is a shortcut method that runs the query on the given connection
// and reads one response from the cursor before closing it.
//
// It returns any errors encountered from running the query or reading the response
func (t Term) ReadOne(dest interface{}, s QueryExecutor, optArgs ...RunOpts) error {
res, err := t.Run(s, optArgs...)
if err != nil {
return err
}
return res.One(dest)
}
// ReadAll is a shortcut method that runs the query on the given connection
// and reads all of the responses from the cursor before closing it.
//
// It returns any errors encountered from running the query or reading the responses
func (t Term) ReadAll(dest interface{}, s QueryExecutor, optArgs ...RunOpts) error {
res, err := t.Run(s, optArgs...)
if err != nil {
return err
}
return res.All(dest)
}
// ExecOpts contains the optional arguments for the Exec function and inherits
// its options from RunOpts, the only difference is the addition of the NoReply
// field.
//
// When NoReply is true it causes the driver not to wait to receive the result
// and return immediately.
type ExecOpts struct {
DB interface{} `gorethink:"db,omitempty"`
Db interface{} `gorethink:"db,omitempty"` // Deprecated
Profile interface{} `gorethink:"profile,omitempty"`
Durability interface{} `gorethink:"durability,omitempty"`
UseOutdated interface{} `gorethink:"use_outdated,omitempty"` // Deprecated
ArrayLimit interface{} `gorethink:"array_limit,omitempty"`
TimeFormat interface{} `gorethink:"time_format,omitempty"`
GroupFormat interface{} `gorethink:"group_format,omitempty"`
BinaryFormat interface{} `gorethink:"binary_format,omitempty"`
GeometryFormat interface{} `gorethink:"geometry_format,omitempty"`
MinBatchRows interface{} `gorethink:"min_batch_rows,omitempty"`
MaxBatchRows interface{} `gorethink:"max_batch_rows,omitempty"`
MaxBatchBytes interface{} `gorethink:"max_batch_bytes,omitempty"`
MaxBatchSeconds interface{} `gorethink:"max_batch_seconds,omitempty"`
FirstBatchScaledownFactor interface{} `gorethink:"first_batch_scaledown_factor,omitempty"`
NoReply interface{} `gorethink:"noreply,omitempty"`
}
func (o ExecOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Exec runs the query but does not return the result. Exec will still wait for
// the response to be received unless the NoReply field is true.
//
// err := r.DB("database").Table("table").Insert(doc).Exec(sess, r.ExecOpts{
// NoReply: true,
// })
func (t Term) Exec(s QueryExecutor, optArgs ...ExecOpts) error {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
if s == nil || !s.IsConnected() {
return ErrConnectionClosed
}
q, err := s.newQuery(t, opts)
if err != nil {
return err
}
return s.Exec(q)
}

View File

@ -1,85 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Config can be used to read and/or update the configurations for individual
// tables or databases.
func (t Term) Config() Term {
return constructMethodTerm(t, "Config", p.Term_CONFIG, []interface{}{}, map[string]interface{}{})
}
// Rebalance rebalances the shards of a table. When called on a database, all
// the tables in that database will be rebalanced.
func (t Term) Rebalance() Term {
return constructMethodTerm(t, "Rebalance", p.Term_REBALANCE, []interface{}{}, map[string]interface{}{})
}
// ReconfigureOpts contains the optional arguments for the Reconfigure term.
type ReconfigureOpts struct {
Shards interface{} `gorethink:"shards,omitempty"`
Replicas interface{} `gorethink:"replicas,omitempty"`
DryRun interface{} `gorethink:"dry_run,omitempty"`
EmergencyRepair interface{} `gorethink:"emergency_repair,omitempty"`
NonVotingReplicaTags interface{} `gorethink:"nonvoting_replica_tags,omitempty"`
PrimaryReplicaTag interface{} `gorethink:"primary_replica_tag,omitempty"`
}
func (o ReconfigureOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Reconfigure a table's sharding and replication.
func (t Term) Reconfigure(optArgs ...ReconfigureOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Reconfigure", p.Term_RECONFIGURE, []interface{}{}, opts)
}
// Status return the status of a table
func (t Term) Status() Term {
return constructMethodTerm(t, "Status", p.Term_STATUS, []interface{}{}, map[string]interface{}{})
}
// WaitOpts contains the optional arguments for the Wait term.
type WaitOpts struct {
WaitFor interface{} `gorethink:"wait_for,omitempty"`
Timeout interface{} `gorethink:"timeout,omitempty"`
}
func (o WaitOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Wait for a table or all the tables in a database to be ready. A table may be
// temporarily unavailable after creation, rebalancing or reconfiguring. The
// wait command blocks until the given table (or database) is fully up to date.
//
// Deprecated: This function is not supported by RethinkDB 2.3 and above.
func Wait(optArgs ...WaitOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("Wait", p.Term_WAIT, []interface{}{}, opts)
}
// Wait for a table or all the tables in a database to be ready. A table may be
// temporarily unavailable after creation, rebalancing or reconfiguring. The
// wait command blocks until the given table (or database) is fully up to date.
func (t Term) Wait(optArgs ...WaitOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Wait", p.Term_WAIT, []interface{}{}, opts)
}
// Grant modifies access permissions for a user account, globally or on a
// per-database or per-table basis.
func (t Term) Grant(args ...interface{}) Term {
return constructMethodTerm(t, "Grant", p.Term_GRANT, args, map[string]interface{}{})
}

View File

@ -1,362 +0,0 @@
package gorethink
import p "gopkg.in/gorethink/gorethink.v2/ql2"
// Aggregation
// These commands are used to compute smaller values from large sequences.
// Reduce produces a single value from a sequence through repeated application
// of a reduction function
//
// It takes one argument of type `func (r.Term, r.Term) interface{}`, for
// example this query sums all elements in an array:
//
// r.Expr([]int{1,3,6}).Reduce(func (left, right r.Term) interface{} {
// return left.Add(right)
// })
func (t Term) Reduce(args ...interface{}) Term {
return constructMethodTerm(t, "Reduce", p.Term_REDUCE, funcWrapArgs(args), map[string]interface{}{})
}
// DistinctOpts contains the optional arguments for the Distinct term
type DistinctOpts struct {
Index interface{} `gorethink:"index,omitempty"`
}
func (o DistinctOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Distinct removes duplicate elements from the sequence.
func Distinct(arg interface{}, optArgs ...DistinctOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("Distinct", p.Term_DISTINCT, []interface{}{arg}, opts)
}
// Distinct removes duplicate elements from the sequence.
func (t Term) Distinct(optArgs ...DistinctOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Distinct", p.Term_DISTINCT, []interface{}{}, opts)
}
// GroupOpts contains the optional arguments for the Group term
type GroupOpts struct {
Index interface{} `gorethink:"index,omitempty"`
Multi interface{} `gorethink:"multi,omitempty"`
}
func (o GroupOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Group takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
func Group(fieldOrFunctions ...interface{}) Term {
return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{})
}
// MultiGroup takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
//
// Unlike Group single documents can be assigned to multiple groups, similar
// to the behavior of multi-indexes. When the grouping value is an array, documents
// will be placed in each group that corresponds to the elements of the array. If
// the array is empty the row will be ignored.
func MultiGroup(fieldOrFunctions ...interface{}) Term {
return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{
"multi": true,
})
}
// GroupByIndex takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
func GroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term {
return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{
"index": index,
})
}
// MultiGroupByIndex takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
//
// Unlike Group single documents can be assigned to multiple groups, similar
// to the behavior of multi-indexes. When the grouping value is an array, documents
// will be placed in each group that corresponds to the elements of the array. If
// the array is empty the row will be ignored.
func MultiGroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term {
return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{
"index": index,
"mutli": true,
})
}
// Group takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
func (t Term) Group(fieldOrFunctions ...interface{}) Term {
return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{})
}
// MultiGroup takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
//
// Unlike Group single documents can be assigned to multiple groups, similar
// to the behavior of multi-indexes. When the grouping value is an array, documents
// will be placed in each group that corresponds to the elements of the array. If
// the array is empty the row will be ignored.
func (t Term) MultiGroup(fieldOrFunctions ...interface{}) Term {
return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{
"multi": true,
})
}
// GroupByIndex takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
func (t Term) GroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term {
return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{
"index": index,
})
}
// MultiGroupByIndex takes a stream and partitions it into multiple groups based on the
// fields or functions provided. Commands chained after group will be
// called on each of these grouped sub-streams, producing grouped data.
//
// Unlike Group single documents can be assigned to multiple groups, similar
// to the behavior of multi-indexes. When the grouping value is an array, documents
// will be placed in each group that corresponds to the elements of the array. If
// the array is empty the row will be ignored.
func (t Term) MultiGroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term {
return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{
"index": index,
"mutli": true,
})
}
// Ungroup takes a grouped stream or grouped data and turns it into an array of
// objects representing the groups. Any commands chained after Ungroup will
// operate on this array, rather than operating on each group individually.
// This is useful if you want to e.g. order the groups by the value of their
// reduction.
func (t Term) Ungroup(args ...interface{}) Term {
return constructMethodTerm(t, "Ungroup", p.Term_UNGROUP, args, map[string]interface{}{})
}
// Contains returns whether or not a sequence contains all the specified values,
// or if functions are provided instead, returns whether or not a sequence
// contains values matching all the specified functions.
func Contains(args ...interface{}) Term {
return constructRootTerm("Contains", p.Term_CONTAINS, funcWrapArgs(args), map[string]interface{}{})
}
// Contains returns whether or not a sequence contains all the specified values,
// or if functions are provided instead, returns whether or not a sequence
// contains values matching all the specified functions.
func (t Term) Contains(args ...interface{}) Term {
return constructMethodTerm(t, "Contains", p.Term_CONTAINS, funcWrapArgs(args), map[string]interface{}{})
}
// Aggregators
// These standard aggregator objects are to be used in conjunction with Group.
// Count the number of elements in the sequence. With a single argument,
// count the number of elements equal to it. If the argument is a function,
// it is equivalent to calling filter before count.
func Count(args ...interface{}) Term {
return constructRootTerm("Count", p.Term_COUNT, funcWrapArgs(args), map[string]interface{}{})
}
// Count the number of elements in the sequence. With a single argument,
// count the number of elements equal to it. If the argument is a function,
// it is equivalent to calling filter before count.
func (t Term) Count(args ...interface{}) Term {
return constructMethodTerm(t, "Count", p.Term_COUNT, funcWrapArgs(args), map[string]interface{}{})
}
// Sum returns the sum of all the elements of a sequence. If called with a field
// name, sums all the values of that field in the sequence, skipping elements of
// the sequence that lack that field. If called with a function, calls that
// function on every element of the sequence and sums the results, skipping
// elements of the sequence where that function returns null or a non-existence
// error.
func Sum(args ...interface{}) Term {
return constructRootTerm("Sum", p.Term_SUM, funcWrapArgs(args), map[string]interface{}{})
}
// Sum returns the sum of all the elements of a sequence. If called with a field
// name, sums all the values of that field in the sequence, skipping elements of
// the sequence that lack that field. If called with a function, calls that
// function on every element of the sequence and sums the results, skipping
// elements of the sequence where that function returns null or a non-existence
// error.
func (t Term) Sum(args ...interface{}) Term {
return constructMethodTerm(t, "Sum", p.Term_SUM, funcWrapArgs(args), map[string]interface{}{})
}
// Avg returns the average of all the elements of a sequence. If called with a field
// name, averages all the values of that field in the sequence, skipping elements of
// the sequence that lack that field. If called with a function, calls that function
// on every element of the sequence and averages the results, skipping elements of the
// sequence where that function returns null or a non-existence error.
func Avg(args ...interface{}) Term {
return constructRootTerm("Avg", p.Term_AVG, funcWrapArgs(args), map[string]interface{}{})
}
// Avg returns the average of all the elements of a sequence. If called with a field
// name, averages all the values of that field in the sequence, skipping elements of
// the sequence that lack that field. If called with a function, calls that function
// on every element of the sequence and averages the results, skipping elements of the
// sequence where that function returns null or a non-existence error.
func (t Term) Avg(args ...interface{}) Term {
return constructMethodTerm(t, "Avg", p.Term_AVG, funcWrapArgs(args), map[string]interface{}{})
}
// MinOpts contains the optional arguments for the Min term
type MinOpts struct {
Index interface{} `gorethink:"index,omitempty"`
}
func (o MinOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Min finds the minimum of a sequence. If called with a field name, finds the element
// of that sequence with the smallest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the smallest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func Min(args ...interface{}) Term {
return constructRootTerm("Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{})
}
// Min finds the minimum of a sequence. If called with a field name, finds the element
// of that sequence with the smallest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the smallest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func (t Term) Min(args ...interface{}) Term {
return constructMethodTerm(t, "Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{})
}
// MinIndex finds the minimum of a sequence. If called with a field name, finds the element
// of that sequence with the smallest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the smallest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func MinIndex(index interface{}, args ...interface{}) Term {
return constructRootTerm("Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{
"index": index,
})
}
// MinIndex finds the minimum of a sequence. If called with a field name, finds the element
// of that sequence with the smallest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the smallest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func (t Term) MinIndex(index interface{}, args ...interface{}) Term {
return constructMethodTerm(t, "Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{
"index": index,
})
}
// MaxOpts contains the optional arguments for the Max term
type MaxOpts struct {
Index interface{} `gorethink:"index,omitempty"`
}
func (o MaxOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Max finds the maximum of a sequence. If called with a field name, finds the element
// of that sequence with the largest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the largest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func Max(args ...interface{}) Term {
return constructRootTerm("Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{})
}
// Max finds the maximum of a sequence. If called with a field name, finds the element
// of that sequence with the largest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the largest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func (t Term) Max(args ...interface{}) Term {
return constructMethodTerm(t, "Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{})
}
// MaxIndex finds the maximum of a sequence. If called with a field name, finds the element
// of that sequence with the largest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the largest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func MaxIndex(index interface{}, args ...interface{}) Term {
return constructRootTerm("Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{
"index": index,
})
}
// MaxIndex finds the maximum of a sequence. If called with a field name, finds the element
// of that sequence with the largest value in that field. If called with a function,
// calls that function on every element of the sequence and returns the element
// which produced the largest value, ignoring any elements where the function
// returns null or produces a non-existence error.
func (t Term) MaxIndex(index interface{}, args ...interface{}) Term {
return constructMethodTerm(t, "Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{
"index": index,
})
}
// FoldOpts contains the optional arguments for the Fold term
type FoldOpts struct {
Emit interface{} `gorethink:"emit,omitempty"`
FinalEmit interface{} `gorethink:"final_emit,omitempty"`
}
func (o FoldOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Fold applies a function to a sequence in order, maintaining state via an
// accumulator. The Fold command returns either a single value or a new sequence.
//
// In its first form, Fold operates like Reduce, returning a value by applying a
// combining function to each element in a sequence, passing the current element
// and the previous reduction result to the function. However, Fold has the
// following differences from Reduce:
// - it is guaranteed to proceed through the sequence from first element to last.
// - it passes an initial base value to the function with the first element in
// place of the previous reduction result.
//
// In its second form, Fold operates like ConcatMap, returning a new sequence
// rather than a single value. When an emit function is provided, Fold will:
// - proceed through the sequence in order and take an initial base value, as above.
// - for each element in the sequence, call both the combining function and a
// separate emitting function with the current element and previous reduction result.
// - optionally pass the result of the combining function to the emitting function.
//
// If provided, the emitting function must return a list.
func (t Term) Fold(base, fn interface{}, optArgs ...FoldOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
args := []interface{}{base, funcWrap(fn)}
return constructMethodTerm(t, "Fold", p.Term_FOLD, args, opts)
}

View File

@ -1,395 +0,0 @@
package gorethink
import (
"encoding/base64"
"encoding/json"
"reflect"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Expr converts any value to an expression and is also used by many other terms
// such as Insert and Update. This function can convert the following basic Go
// types (bool, int, uint, string, float) and even pointers, maps and structs.
//
// When evaluating structs they are encoded into a map before being sent to the
// server. Each exported field is added to the map unless
//
// - the field's tag is "-", or
// - the field is empty and its tag specifies the "omitempty" option.
//
// Each fields default name in the map is the field name but can be specified
// in the struct field's tag value. The "gorethink" key in the struct field's
// tag value is the key name, followed by an optional comma and options. Examples:
//
// // Field is ignored by this package.
// Field int `gorethink:"-"`
// // Field appears as key "myName".
// Field int `gorethink:"myName"`
// // Field appears as key "myName" and
// // the field is omitted from the object if its value is empty,
// // as defined above.
// Field int `gorethink:"myName,omitempty"`
// // Field appears as key "Field" (the default), but
// // the field is skipped if empty.
// // Note the leading comma.
// Field int `gorethink:",omitempty"`
func Expr(val interface{}) Term {
if val == nil {
return Term{
termType: p.Term_DATUM,
data: nil,
}
}
switch val := val.(type) {
case Term:
return val
case []interface{}:
vals := make([]Term, len(val))
for i, v := range val {
vals[i] = Expr(v)
}
return makeArray(vals)
case map[string]interface{}:
vals := make(map[string]Term, len(val))
for k, v := range val {
vals[k] = Expr(v)
}
return makeObject(vals)
case
bool,
int,
int8,
int16,
int32,
int64,
uint,
uint8,
uint16,
uint32,
uint64,
float32,
float64,
uintptr,
string,
*bool,
*int,
*int8,
*int16,
*int32,
*int64,
*uint,
*uint8,
*uint16,
*uint32,
*uint64,
*float32,
*float64,
*uintptr,
*string:
return Term{
termType: p.Term_DATUM,
data: val,
}
default:
// Use reflection to check for other types
valType := reflect.TypeOf(val)
valValue := reflect.ValueOf(val)
switch valType.Kind() {
case reflect.Func:
return makeFunc(val)
case reflect.Struct, reflect.Map, reflect.Ptr:
data, err := encode(val)
if err != nil || data == nil {
return Term{
termType: p.Term_DATUM,
data: nil,
lastErr: err,
}
}
return Expr(data)
case reflect.Slice, reflect.Array:
// Check if slice is a byte slice
if valType.Elem().Kind() == reflect.Uint8 {
data, err := encode(val)
if err != nil || data == nil {
return Term{
termType: p.Term_DATUM,
data: nil,
lastErr: err,
}
}
return Expr(data)
}
vals := make([]Term, valValue.Len())
for i := 0; i < valValue.Len(); i++ {
vals[i] = Expr(valValue.Index(i).Interface())
}
return makeArray(vals)
default:
data, err := encode(val)
if err != nil || data == nil {
return Term{
termType: p.Term_DATUM,
data: nil,
lastErr: err,
}
}
return Term{
termType: p.Term_DATUM,
data: data,
}
}
}
}
// JSOpts contains the optional arguments for the JS term
type JSOpts struct {
Timeout interface{} `gorethink:"timeout,omitempty"`
}
func (o JSOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// JS creates a JavaScript expression which is evaluated by the database when
// running the query.
func JS(jssrc interface{}, optArgs ...JSOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("Js", p.Term_JAVASCRIPT, []interface{}{jssrc}, opts)
}
// HTTPOpts contains the optional arguments for the HTTP term
type HTTPOpts struct {
// General Options
Timeout interface{} `gorethink:"timeout,omitempty"`
Reattempts interface{} `gorethink:"reattempts,omitempty"`
Redirects interface{} `gorethink:"redirect,omitempty"`
Verify interface{} `gorethink:"verify,omitempty"`
ResultFormat interface{} `gorethink:"resul_format,omitempty"`
// Request Options
Method interface{} `gorethink:"method,omitempty"`
Auth interface{} `gorethink:"auth,omitempty"`
Params interface{} `gorethink:"params,omitempty"`
Header interface{} `gorethink:"header,omitempty"`
Data interface{} `gorethink:"data,omitempty"`
// Pagination
Page interface{} `gorethink:"page,omitempty"`
PageLimit interface{} `gorethink:"page_limit,omitempty"`
}
func (o HTTPOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// HTTP retrieves data from the specified URL over HTTP. The return type depends
// on the resultFormat option, which checks the Content-Type of the response by
// default.
func HTTP(url interface{}, optArgs ...HTTPOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("Http", p.Term_HTTP, []interface{}{url}, opts)
}
// JSON parses a JSON string on the server.
func JSON(args ...interface{}) Term {
return constructRootTerm("Json", p.Term_JSON, args, map[string]interface{}{})
}
// Error throws a runtime error. If called with no arguments inside the second argument
// to `default`, re-throw the current error.
func Error(args ...interface{}) Term {
return constructRootTerm("Error", p.Term_ERROR, args, map[string]interface{}{})
}
// Args is a special term usd to splice an array of arguments into another term.
// This is useful when you want to call a varadic term such as GetAll with a set
// of arguments provided at runtime.
func Args(args ...interface{}) Term {
return constructRootTerm("Args", p.Term_ARGS, args, map[string]interface{}{})
}
// Binary encapsulates binary data within a query.
//
// The type of data binary accepts depends on the client language. In Go, it
// expects either a byte array/slice or a bytes.Buffer.
//
// Only a limited subset of ReQL commands may be chained after binary:
// - coerceTo can coerce binary objects to string types
// - count will return the number of bytes in the object
// - slice will treat bytes like array indexes (i.e., slice(10,20) will return bytes 1019)
// - typeOf returns PTYPE<BINARY>
// - info will return information on a binary object.
func Binary(data interface{}) Term {
var b []byte
switch data := data.(type) {
case Term:
return constructRootTerm("Binary", p.Term_BINARY, []interface{}{data}, map[string]interface{}{})
case []byte:
b = data
default:
typ := reflect.TypeOf(data)
if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 {
return Binary(reflect.ValueOf(data).Bytes())
} else if typ.Kind() == reflect.Array && typ.Elem().Kind() == reflect.Uint8 {
v := reflect.ValueOf(data)
b = make([]byte, v.Len())
for i := 0; i < v.Len(); i++ {
b[i] = v.Index(i).Interface().(byte)
}
return Binary(b)
}
panic("Unsupported binary type")
}
return binaryTerm(base64.StdEncoding.EncodeToString(b))
}
func binaryTerm(data string) Term {
t := constructRootTerm("Binary", p.Term_BINARY, []interface{}{}, map[string]interface{}{})
t.data = data
return t
}
// Do evaluates the expr in the context of one or more value bindings. The type of
// the result is the type of the value returned from expr.
func (t Term) Do(args ...interface{}) Term {
newArgs := []interface{}{}
newArgs = append(newArgs, funcWrap(args[len(args)-1]))
newArgs = append(newArgs, t)
newArgs = append(newArgs, args[:len(args)-1]...)
return constructRootTerm("Do", p.Term_FUNCALL, newArgs, map[string]interface{}{})
}
// Do evaluates the expr in the context of one or more value bindings. The type of
// the result is the type of the value returned from expr.
func Do(args ...interface{}) Term {
newArgs := []interface{}{}
newArgs = append(newArgs, funcWrap(args[len(args)-1]))
newArgs = append(newArgs, args[:len(args)-1]...)
return constructRootTerm("Do", p.Term_FUNCALL, newArgs, map[string]interface{}{})
}
// Branch evaluates one of two control paths based on the value of an expression.
// branch is effectively an if renamed due to language constraints.
//
// The type of the result is determined by the type of the branch that gets executed.
func Branch(args ...interface{}) Term {
return constructRootTerm("Branch", p.Term_BRANCH, args, map[string]interface{}{})
}
// Branch evaluates one of two control paths based on the value of an expression.
// branch is effectively an if renamed due to language constraints.
//
// The type of the result is determined by the type of the branch that gets executed.
func (t Term) Branch(args ...interface{}) Term {
return constructMethodTerm(t, "Branch", p.Term_BRANCH, args, map[string]interface{}{})
}
// ForEach loops over a sequence, evaluating the given write query for each element.
//
// It takes one argument of type `func (r.Term) interface{}`, for
// example clones a table:
//
// r.Table("table").ForEach(func (row r.Term) interface{} {
// return r.Table("new_table").Insert(row)
// })
func (t Term) ForEach(args ...interface{}) Term {
return constructMethodTerm(t, "Foreach", p.Term_FOR_EACH, funcWrapArgs(args), map[string]interface{}{})
}
// Range generates a stream of sequential integers in a specified range. It
// accepts 0, 1, or 2 arguments, all of which should be numbers.
func Range(args ...interface{}) Term {
return constructRootTerm("Range", p.Term_RANGE, args, map[string]interface{}{})
}
// Default handles non-existence errors. Tries to evaluate and return its first argument.
// If an error related to the absence of a value is thrown in the process, or if
// its first argument returns null, returns its second argument. (Alternatively,
// the second argument may be a function which will be called with either the
// text of the non-existence error or null.)
func (t Term) Default(args ...interface{}) Term {
return constructMethodTerm(t, "Default", p.Term_DEFAULT, args, map[string]interface{}{})
}
// CoerceTo converts a value of one type into another.
//
// You can convert: a selection, sequence, or object into an ARRAY, an array of
// pairs into an OBJECT, and any DATUM into a STRING.
func (t Term) CoerceTo(args ...interface{}) Term {
return constructMethodTerm(t, "CoerceTo", p.Term_COERCE_TO, args, map[string]interface{}{})
}
// TypeOf gets the type of a value.
func TypeOf(args ...interface{}) Term {
return constructRootTerm("TypeOf", p.Term_TYPE_OF, args, map[string]interface{}{})
}
// TypeOf gets the type of a value.
func (t Term) TypeOf(args ...interface{}) Term {
return constructMethodTerm(t, "TypeOf", p.Term_TYPE_OF, args, map[string]interface{}{})
}
// ToJSON converts a ReQL value or object to a JSON string.
func (t Term) ToJSON() Term {
return constructMethodTerm(t, "ToJSON", p.Term_TO_JSON_STRING, []interface{}{}, map[string]interface{}{})
}
// Info gets information about a RQL value.
func (t Term) Info(args ...interface{}) Term {
return constructMethodTerm(t, "Info", p.Term_INFO, args, map[string]interface{}{})
}
// UUID returns a UUID (universally unique identifier), a string that can be used
// as a unique ID. If a string is passed to uuid as an argument, the UUID will be
// deterministic, derived from the strings SHA-1 hash.
func UUID(args ...interface{}) Term {
return constructRootTerm("UUID", p.Term_UUID, args, map[string]interface{}{})
}
// RawQuery creates a new query from a JSON string, this bypasses any encoding
// done by GoRethink. The query should not contain the query type or any options
// as this should be handled using the normal driver API.
//
// THis query will only work if this is the only term in the query.
func RawQuery(q []byte) Term {
data := json.RawMessage(q)
return Term{
name: "RawQuery",
rootTerm: true,
rawQuery: true,
data: &data,
args: []Term{
Term{
termType: p.Term_DATUM,
data: string(q),
},
},
}
}

View File

@ -1,25 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// DBCreate creates a database. A RethinkDB database is a collection of tables,
// similar to relational databases.
//
// Note: that you can only use alphanumeric characters and underscores for the
// database name.
func DBCreate(args ...interface{}) Term {
return constructRootTerm("DBCreate", p.Term_DB_CREATE, args, map[string]interface{}{})
}
// DBDrop drops a database. The database, all its tables, and corresponding data
// will be deleted.
func DBDrop(args ...interface{}) Term {
return constructRootTerm("DBDrop", p.Term_DB_DROP, args, map[string]interface{}{})
}
// DBList lists all database names in the system.
func DBList(args ...interface{}) Term {
return constructRootTerm("DBList", p.Term_DB_LIST, args, map[string]interface{}{})
}

View File

@ -1,170 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// CircleOpts contains the optional arguments for the Circle term.
type CircleOpts struct {
NumVertices interface{} `gorethink:"num_vertices,omitempty"`
GeoSystem interface{} `gorethink:"geo_system,omitempty"`
Unit interface{} `gorethink:"unit,omitempty"`
Fill interface{} `gorethink:"fill,omitempty"`
}
func (o CircleOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Circle constructs a circular line or polygon. A circle in RethinkDB is
// a polygon or line approximating a circle of a given radius around a given
// center, consisting of a specified number of vertices (default 32).
func Circle(point, radius interface{}, optArgs ...CircleOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("Circle", p.Term_CIRCLE, []interface{}{point, radius}, opts)
}
// DistanceOpts contains the optional arguments for the Distance term.
type DistanceOpts struct {
GeoSystem interface{} `gorethink:"geo_system,omitempty"`
Unit interface{} `gorethink:"unit,omitempty"`
}
func (o DistanceOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Distance calculates the Haversine distance between two points. At least one
// of the geometry objects specified must be a point.
func (t Term) Distance(point interface{}, optArgs ...DistanceOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Distance", p.Term_DISTANCE, []interface{}{point}, opts)
}
// Distance calculates the Haversine distance between two points. At least one
// of the geometry objects specified must be a point.
func Distance(point1, point2 interface{}, optArgs ...DistanceOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("Distance", p.Term_DISTANCE, []interface{}{point1, point2}, opts)
}
// Fill converts a Line object into a Polygon object. If the last point does not
// specify the same coordinates as the first point, polygon will close the
// polygon by connecting them
func (t Term) Fill() Term {
return constructMethodTerm(t, "Fill", p.Term_FILL, []interface{}{}, map[string]interface{}{})
}
// GeoJSON converts a GeoJSON object to a ReQL geometry object.
func GeoJSON(args ...interface{}) Term {
return constructRootTerm("GeoJSON", p.Term_GEOJSON, args, map[string]interface{}{})
}
// ToGeoJSON converts a ReQL geometry object to a GeoJSON object.
func (t Term) ToGeoJSON(args ...interface{}) Term {
return constructMethodTerm(t, "ToGeoJSON", p.Term_TO_GEOJSON, args, map[string]interface{}{})
}
// GetIntersectingOpts contains the optional arguments for the GetIntersecting term.
type GetIntersectingOpts struct {
Index interface{} `gorethink:"index,omitempty"`
}
func (o GetIntersectingOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// GetIntersecting gets all documents where the given geometry object intersects
// the geometry object of the requested geospatial index.
func (t Term) GetIntersecting(args interface{}, optArgs ...GetIntersectingOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "GetIntersecting", p.Term_GET_INTERSECTING, []interface{}{args}, opts)
}
// GetNearestOpts contains the optional arguments for the GetNearest term.
type GetNearestOpts struct {
Index interface{} `gorethink:"index,omitempty"`
MaxResults interface{} `gorethink:"max_results,omitempty"`
MaxDist interface{} `gorethink:"max_dist,omitempty"`
Unit interface{} `gorethink:"unit,omitempty"`
GeoSystem interface{} `gorethink:"geo_system,omitempty"`
}
func (o GetNearestOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// GetNearest gets all documents where the specified geospatial index is within a
// certain distance of the specified point (default 100 kilometers).
func (t Term) GetNearest(point interface{}, optArgs ...GetNearestOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "GetNearest", p.Term_GET_NEAREST, []interface{}{point}, opts)
}
// Includes tests whether a geometry object is completely contained within another.
// When applied to a sequence of geometry objects, includes acts as a filter,
// returning a sequence of objects from the sequence that include the argument.
func (t Term) Includes(args ...interface{}) Term {
return constructMethodTerm(t, "Includes", p.Term_INCLUDES, args, map[string]interface{}{})
}
// Intersects tests whether two geometry objects intersect with one another.
// When applied to a sequence of geometry objects, intersects acts as a filter,
// returning a sequence of objects from the sequence that intersect with the
// argument.
func (t Term) Intersects(args ...interface{}) Term {
return constructMethodTerm(t, "Intersects", p.Term_INTERSECTS, args, map[string]interface{}{})
}
// Line constructs a geometry object of type Line. The line can be specified in
// one of two ways:
// - Two or more two-item arrays, specifying longitude and latitude numbers of
// the line's vertices;
// - Two or more Point objects specifying the line's vertices.
func Line(args ...interface{}) Term {
return constructRootTerm("Line", p.Term_LINE, args, map[string]interface{}{})
}
// Point constructs a geometry object of type Point. The point is specified by
// two floating point numbers, the longitude (180 to 180) and latitude
// (90 to 90) of the point on a perfect sphere.
func Point(lon, lat interface{}) Term {
return constructRootTerm("Point", p.Term_POINT, []interface{}{lon, lat}, map[string]interface{}{})
}
// Polygon constructs a geometry object of type Polygon. The Polygon can be
// specified in one of two ways:
// - Three or more two-item arrays, specifying longitude and latitude numbers of the polygon's vertices;
// - Three or more Point objects specifying the polygon's vertices.
func Polygon(args ...interface{}) Term {
return constructRootTerm("Polygon", p.Term_POLYGON, args, map[string]interface{}{})
}
// PolygonSub "punches a hole" out of the parent polygon using the polygon passed
// to the function.
// polygon1.PolygonSub(polygon2) -> polygon
// In the example above polygon2 must be completely contained within polygon1
// and must have no holes itself (it must not be the output of polygon_sub itself).
func (t Term) PolygonSub(args ...interface{}) Term {
return constructMethodTerm(t, "PolygonSub", p.Term_POLYGON_SUB, args, map[string]interface{}{})
}

View File

@ -1,47 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// InnerJoin returns the inner product of two sequences (e.g. a table, a filter result)
// filtered by the predicate. The query compares each row of the left sequence
// with each row of the right sequence to find all pairs of rows which satisfy
// the predicate. When the predicate is satisfied, each matched pair of rows
// of both sequences are combined into a result row.
func (t Term) InnerJoin(args ...interface{}) Term {
return constructMethodTerm(t, "InnerJoin", p.Term_INNER_JOIN, args, map[string]interface{}{})
}
// OuterJoin computes a left outer join by retaining each row in the left table even
// if no match was found in the right table.
func (t Term) OuterJoin(args ...interface{}) Term {
return constructMethodTerm(t, "OuterJoin", p.Term_OUTER_JOIN, args, map[string]interface{}{})
}
// EqJoinOpts contains the optional arguments for the EqJoin term.
type EqJoinOpts struct {
Index interface{} `gorethink:"index,omitempty"`
Ordered interface{} `gorethink:"ordered,omitempty"`
}
func (o EqJoinOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// EqJoin is an efficient join that looks up elements in the right table by primary key.
//
// Optional arguments: "index" (string - name of the index to use in right table instead of the primary key)
func (t Term) EqJoin(left, right interface{}, optArgs ...EqJoinOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "EqJoin", p.Term_EQ_JOIN, []interface{}{funcWrap(left), right}, opts)
}
// Zip is used to 'zip' up the result of a join by merging the 'right' fields into 'left'
// fields of each member of the sequence.
func (t Term) Zip(args ...interface{}) Term {
return constructMethodTerm(t, "Zip", p.Term_ZIP, args, map[string]interface{}{})
}

View File

@ -1,121 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Row returns the currently visited document. Note that Row does not work within
// subqueries to access nested documents; you should use anonymous functions to
// access those documents instead. Also note that unlike in other drivers to
// access a rows fields you should call Field. For example:
// r.row("fieldname") should instead be r.Row.Field("fieldname")
var Row = constructRootTerm("Doc", p.Term_IMPLICIT_VAR, []interface{}{}, map[string]interface{}{})
// Literal replaces an object in a field instead of merging it with an existing
// object in a merge or update operation.
func Literal(args ...interface{}) Term {
return constructRootTerm("Literal", p.Term_LITERAL, args, map[string]interface{}{})
}
// Field gets a single field from an object. If called on a sequence, gets that field
// from every object in the sequence, skipping objects that lack it.
func (t Term) Field(args ...interface{}) Term {
return constructMethodTerm(t, "Field", p.Term_GET_FIELD, args, map[string]interface{}{})
}
// HasFields tests if an object has all of the specified fields. An object has a field if
// it has the specified key and that key maps to a non-null value. For instance,
// the object `{'a':1,'b':2,'c':null}` has the fields `a` and `b`.
func (t Term) HasFields(args ...interface{}) Term {
return constructMethodTerm(t, "HasFields", p.Term_HAS_FIELDS, args, map[string]interface{}{})
}
// Pluck plucks out one or more attributes from either an object or a sequence of
// objects (projection).
func (t Term) Pluck(args ...interface{}) Term {
return constructMethodTerm(t, "Pluck", p.Term_PLUCK, args, map[string]interface{}{})
}
// Without is the opposite of pluck; takes an object or a sequence of objects, and returns
// them with the specified paths removed.
func (t Term) Without(args ...interface{}) Term {
return constructMethodTerm(t, "Without", p.Term_WITHOUT, args, map[string]interface{}{})
}
// Merge merges two objects together to construct a new object with properties from both.
// Gives preference to attributes from other when there is a conflict.
func (t Term) Merge(args ...interface{}) Term {
return constructMethodTerm(t, "Merge", p.Term_MERGE, funcWrapArgs(args), map[string]interface{}{})
}
// Append appends a value to an array.
func (t Term) Append(args ...interface{}) Term {
return constructMethodTerm(t, "Append", p.Term_APPEND, args, map[string]interface{}{})
}
// Prepend prepends a value to an array.
func (t Term) Prepend(args ...interface{}) Term {
return constructMethodTerm(t, "Prepend", p.Term_PREPEND, args, map[string]interface{}{})
}
// Difference removes the elements of one array from another array.
func (t Term) Difference(args ...interface{}) Term {
return constructMethodTerm(t, "Difference", p.Term_DIFFERENCE, args, map[string]interface{}{})
}
// SetInsert adds a value to an array and return it as a set (an array with distinct values).
func (t Term) SetInsert(args ...interface{}) Term {
return constructMethodTerm(t, "SetInsert", p.Term_SET_INSERT, args, map[string]interface{}{})
}
// SetUnion adds several values to an array and return it as a set (an array with
// distinct values).
func (t Term) SetUnion(args ...interface{}) Term {
return constructMethodTerm(t, "SetUnion", p.Term_SET_UNION, args, map[string]interface{}{})
}
// SetIntersection calculates the intersection of two arrays returning values that
// occur in both of them as a set (an array with distinct values).
func (t Term) SetIntersection(args ...interface{}) Term {
return constructMethodTerm(t, "SetIntersection", p.Term_SET_INTERSECTION, args, map[string]interface{}{})
}
// SetDifference removes the elements of one array from another and return them as a set (an
// array with distinct values).
func (t Term) SetDifference(args ...interface{}) Term {
return constructMethodTerm(t, "SetDifference", p.Term_SET_DIFFERENCE, args, map[string]interface{}{})
}
// InsertAt inserts a value in to an array at a given index. Returns the modified array.
func (t Term) InsertAt(args ...interface{}) Term {
return constructMethodTerm(t, "InsertAt", p.Term_INSERT_AT, args, map[string]interface{}{})
}
// SpliceAt inserts several values in to an array at a given index. Returns the modified array.
func (t Term) SpliceAt(args ...interface{}) Term {
return constructMethodTerm(t, "SpliceAt", p.Term_SPLICE_AT, args, map[string]interface{}{})
}
// DeleteAt removes an element from an array at a given index. Returns the modified array.
func (t Term) DeleteAt(args ...interface{}) Term {
return constructMethodTerm(t, "DeleteAt", p.Term_DELETE_AT, args, map[string]interface{}{})
}
// ChangeAt changes a value in an array at a given index. Returns the modified array.
func (t Term) ChangeAt(args ...interface{}) Term {
return constructMethodTerm(t, "ChangeAt", p.Term_CHANGE_AT, args, map[string]interface{}{})
}
// Keys returns an array containing all of the object's keys.
func (t Term) Keys(args ...interface{}) Term {
return constructMethodTerm(t, "Keys", p.Term_KEYS, args, map[string]interface{}{})
}
func (t Term) Values(args ...interface{}) Term {
return constructMethodTerm(t, "Values", p.Term_VALUES, args, map[string]interface{}{})
}
// Object creates an object from a list of key-value pairs, where the keys must be strings.
func Object(args ...interface{}) Term {
return constructRootTerm("Object", p.Term_OBJECT, args, map[string]interface{}{})
}

View File

@ -1,229 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
var (
// MinVal represents the smallest possible value RethinkDB can store
MinVal = constructRootTerm("MinVal", p.Term_MINVAL, []interface{}{}, map[string]interface{}{})
// MaxVal represents the largest possible value RethinkDB can store
MaxVal = constructRootTerm("MaxVal", p.Term_MAXVAL, []interface{}{}, map[string]interface{}{})
)
// Add sums two numbers or concatenates two arrays.
func (t Term) Add(args ...interface{}) Term {
return constructMethodTerm(t, "Add", p.Term_ADD, args, map[string]interface{}{})
}
// Add sums two numbers or concatenates two arrays.
func Add(args ...interface{}) Term {
return constructRootTerm("Add", p.Term_ADD, args, map[string]interface{}{})
}
// Sub subtracts two numbers.
func (t Term) Sub(args ...interface{}) Term {
return constructMethodTerm(t, "Sub", p.Term_SUB, args, map[string]interface{}{})
}
// Sub subtracts two numbers.
func Sub(args ...interface{}) Term {
return constructRootTerm("Sub", p.Term_SUB, args, map[string]interface{}{})
}
// Mul multiplies two numbers.
func (t Term) Mul(args ...interface{}) Term {
return constructMethodTerm(t, "Mul", p.Term_MUL, args, map[string]interface{}{})
}
// Mul multiplies two numbers.
func Mul(args ...interface{}) Term {
return constructRootTerm("Mul", p.Term_MUL, args, map[string]interface{}{})
}
// Div divides two numbers.
func (t Term) Div(args ...interface{}) Term {
return constructMethodTerm(t, "Div", p.Term_DIV, args, map[string]interface{}{})
}
// Div divides two numbers.
func Div(args ...interface{}) Term {
return constructRootTerm("Div", p.Term_DIV, args, map[string]interface{}{})
}
// Mod divides two numbers and returns the remainder.
func (t Term) Mod(args ...interface{}) Term {
return constructMethodTerm(t, "Mod", p.Term_MOD, args, map[string]interface{}{})
}
// Mod divides two numbers and returns the remainder.
func Mod(args ...interface{}) Term {
return constructRootTerm("Mod", p.Term_MOD, args, map[string]interface{}{})
}
// And performs a logical and on two values.
func (t Term) And(args ...interface{}) Term {
return constructMethodTerm(t, "And", p.Term_AND, args, map[string]interface{}{})
}
// And performs a logical and on two values.
func And(args ...interface{}) Term {
return constructRootTerm("And", p.Term_AND, args, map[string]interface{}{})
}
// Or performs a logical or on two values.
func (t Term) Or(args ...interface{}) Term {
return constructMethodTerm(t, "Or", p.Term_OR, args, map[string]interface{}{})
}
// Or performs a logical or on two values.
func Or(args ...interface{}) Term {
return constructRootTerm("Or", p.Term_OR, args, map[string]interface{}{})
}
// Eq returns true if two values are equal.
func (t Term) Eq(args ...interface{}) Term {
return constructMethodTerm(t, "Eq", p.Term_EQ, args, map[string]interface{}{})
}
// Eq returns true if two values are equal.
func Eq(args ...interface{}) Term {
return constructRootTerm("Eq", p.Term_EQ, args, map[string]interface{}{})
}
// Ne returns true if two values are not equal.
func (t Term) Ne(args ...interface{}) Term {
return constructMethodTerm(t, "Ne", p.Term_NE, args, map[string]interface{}{})
}
// Ne returns true if two values are not equal.
func Ne(args ...interface{}) Term {
return constructRootTerm("Ne", p.Term_NE, args, map[string]interface{}{})
}
// Gt returns true if the first value is greater than the second.
func (t Term) Gt(args ...interface{}) Term {
return constructMethodTerm(t, "Gt", p.Term_GT, args, map[string]interface{}{})
}
// Gt returns true if the first value is greater than the second.
func Gt(args ...interface{}) Term {
return constructRootTerm("Gt", p.Term_GT, args, map[string]interface{}{})
}
// Ge returns true if the first value is greater than or equal to the second.
func (t Term) Ge(args ...interface{}) Term {
return constructMethodTerm(t, "Ge", p.Term_GE, args, map[string]interface{}{})
}
// Ge returns true if the first value is greater than or equal to the second.
func Ge(args ...interface{}) Term {
return constructRootTerm("Ge", p.Term_GE, args, map[string]interface{}{})
}
// Lt returns true if the first value is less than the second.
func (t Term) Lt(args ...interface{}) Term {
return constructMethodTerm(t, "Lt", p.Term_LT, args, map[string]interface{}{})
}
// Lt returns true if the first value is less than the second.
func Lt(args ...interface{}) Term {
return constructRootTerm("Lt", p.Term_LT, args, map[string]interface{}{})
}
// Le returns true if the first value is less than or equal to the second.
func (t Term) Le(args ...interface{}) Term {
return constructMethodTerm(t, "Le", p.Term_LE, args, map[string]interface{}{})
}
// Le returns true if the first value is less than or equal to the second.
func Le(args ...interface{}) Term {
return constructRootTerm("Le", p.Term_LE, args, map[string]interface{}{})
}
// Not performs a logical not on a value.
func (t Term) Not(args ...interface{}) Term {
return constructMethodTerm(t, "Not", p.Term_NOT, args, map[string]interface{}{})
}
// Not performs a logical not on a value.
func Not(args ...interface{}) Term {
return constructRootTerm("Not", p.Term_NOT, args, map[string]interface{}{})
}
// RandomOpts contains the optional arguments for the Random term.
type RandomOpts struct {
Float interface{} `gorethink:"float,omitempty"`
}
func (o RandomOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Random generates a random number between given (or implied) bounds. Random
// takes zero, one or two arguments.
//
// With zero arguments, the result will be a floating-point number in the range
// [0,1).
//
// With one argument x, the result will be in the range [0,x), and will be an
// integer unless the Float option is set to true. Specifying a floating point
// number without the Float option will raise an error.
//
// With two arguments x and y, the result will be in the range [x,y), and will
// be an integer unless the Float option is set to true. If x and y are equal an
// error will occur, unless the floating-point option has been specified, in
// which case x will be returned. Specifying a floating point number without the
// float option will raise an error.
//
// Note: Any integer responses can be be coerced to floating-points, when
// unmarshaling to a Go floating-point type. The last argument given will always
// be the open side of the range, but when generating a floating-point
// number, the open side may be less than the closed side.
func Random(args ...interface{}) Term {
var opts = map[string]interface{}{}
// Look for options map
if len(args) > 0 {
if possibleOpts, ok := args[len(args)-1].(RandomOpts); ok {
opts = possibleOpts.toMap()
args = args[:len(args)-1]
}
}
return constructRootTerm("Random", p.Term_RANDOM, args, opts)
}
// Round causes the input number to be rounded the given value to the nearest whole integer.
func (t Term) Round(args ...interface{}) Term {
return constructMethodTerm(t, "Round", p.Term_ROUND, args, map[string]interface{}{})
}
// Round causes the input number to be rounded the given value to the nearest whole integer.
func Round(args ...interface{}) Term {
return constructRootTerm("Round", p.Term_ROUND, args, map[string]interface{}{})
}
// Ceil rounds the given value up, returning the smallest integer value greater
// than or equal to the given value (the values ceiling).
func (t Term) Ceil(args ...interface{}) Term {
return constructMethodTerm(t, "Ceil", p.Term_CEIL, args, map[string]interface{}{})
}
// Ceil rounds the given value up, returning the smallest integer value greater
// than or equal to the given value (the values ceiling).
func Ceil(args ...interface{}) Term {
return constructRootTerm("Ceil", p.Term_CEIL, args, map[string]interface{}{})
}
// Floor rounds the given value down, returning the largest integer value less
// than or equal to the given value (the values floor).
func (t Term) Floor(args ...interface{}) Term {
return constructMethodTerm(t, "Floor", p.Term_FLOOR, args, map[string]interface{}{})
}
// Floor rounds the given value down, returning the largest integer value less
// than or equal to the given value (the values floor).
func Floor(args ...interface{}) Term {
return constructRootTerm("Floor", p.Term_FLOOR, args, map[string]interface{}{})
}

View File

@ -1,141 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// DB references a database.
func DB(args ...interface{}) Term {
return constructRootTerm("DB", p.Term_DB, args, map[string]interface{}{})
}
// TableOpts contains the optional arguments for the Table term
type TableOpts struct {
ReadMode interface{} `gorethink:"read_mode,omitempty"`
UseOutdated interface{} `gorethink:"use_outdated,omitempty"` // Deprecated
IdentifierFormat interface{} `gorethink:"identifier_format,omitempty"`
}
func (o TableOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Table selects all documents in a table. This command can be chained with
// other commands to do further processing on the data.
//
// There are two optional arguments.
// - useOutdated: if true, this allows potentially out-of-date data to be
// returned, with potentially faster reads. It also allows you to perform reads
// from a secondary replica if a primary has failed. Default false.
// - identifierFormat: possible values are name and uuid, with a default of name.
// If set to uuid, then system tables will refer to servers, databases and tables
// by UUID rather than name. (This only has an effect when used with system tables.)
func Table(name interface{}, optArgs ...TableOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("Table", p.Term_TABLE, []interface{}{name}, opts)
}
// Table selects all documents in a table. This command can be chained with
// other commands to do further processing on the data.
//
// There are two optional arguments.
// - useOutdated: if true, this allows potentially out-of-date data to be
// returned, with potentially faster reads. It also allows you to perform reads
// from a secondary replica if a primary has failed. Default false.
// - identifierFormat: possible values are name and uuid, with a default of name.
// If set to uuid, then system tables will refer to servers, databases and tables
// by UUID rather than name. (This only has an effect when used with system tables.)
func (t Term) Table(name interface{}, optArgs ...TableOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Table", p.Term_TABLE, []interface{}{name}, opts)
}
// Get gets a document by primary key. If nothing was found, RethinkDB will return a nil value.
func (t Term) Get(args ...interface{}) Term {
return constructMethodTerm(t, "Get", p.Term_GET, args, map[string]interface{}{})
}
// GetAllOpts contains the optional arguments for the GetAll term
type GetAllOpts struct {
Index interface{} `gorethink:"index,omitempty"`
}
func (o GetAllOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// GetAll gets all documents where the given value matches the value of the primary
// index. Multiple values can be passed this function if you want to select multiple
// documents. If the documents you are fetching have composite keys then each
// argument should be a slice. For more information see the examples.
func (t Term) GetAll(keys ...interface{}) Term {
return constructMethodTerm(t, "GetAll", p.Term_GET_ALL, keys, map[string]interface{}{})
}
// GetAllByIndex gets all documents where the given value matches the value of
// the requested index.
func (t Term) GetAllByIndex(index interface{}, keys ...interface{}) Term {
return constructMethodTerm(t, "GetAll", p.Term_GET_ALL, keys, map[string]interface{}{"index": index})
}
// BetweenOpts contains the optional arguments for the Between term
type BetweenOpts struct {
Index interface{} `gorethink:"index,omitempty"`
LeftBound interface{} `gorethink:"left_bound,omitempty"`
RightBound interface{} `gorethink:"right_bound,omitempty"`
}
func (o BetweenOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Between gets all documents between two keys. Accepts three optional arguments:
// index, leftBound, and rightBound. If index is set to the name of a secondary
// index, between will return all documents where that indexs value is in the
// specified range (it uses the primary key by default). leftBound or rightBound
// may be set to open or closed to indicate whether or not to include that endpoint
// of the range (by default, leftBound is closed and rightBound is open).
//
// You may also use the special constants r.minval and r.maxval for boundaries,
// which represent “less than any index key” and “more than any index key”
// respectively. For instance, if you use r.minval as the lower key, then between
// will return all documents whose primary keys (or indexes) are less than the
// specified upper key.
func (t Term) Between(lowerKey, upperKey interface{}, optArgs ...BetweenOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Between", p.Term_BETWEEN, []interface{}{lowerKey, upperKey}, opts)
}
// FilterOpts contains the optional arguments for the Filter term
type FilterOpts struct {
Default interface{} `gorethink:"default,omitempty"`
}
func (o FilterOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Filter gets all the documents for which the given predicate is true.
//
// Filter can be called on a sequence, selection, or a field containing an array
// of elements. The return type is the same as the type on which the function was
// called on. The body of every filter is wrapped in an implicit `.default(false)`,
// and the default value can be changed by passing the optional argument `default`.
// Setting this optional argument to `r.error()` will cause any non-existence
// errors to abort the filter.
func (t Term) Filter(f interface{}, optArgs ...FilterOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Filter", p.Term_FILTER, []interface{}{funcWrap(f)}, opts)
}

View File

@ -1,44 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Match matches against a regular expression. If no match is found, returns
// null. If there is a match then an object with the following fields is
// returned:
// str: The matched string
// start: The matched strings start
// end: The matched strings end
// groups: The capture groups defined with parentheses
//
// Accepts RE2 syntax (https://code.google.com/p/re2/wiki/Syntax). You can
// enable case-insensitive matching by prefixing the regular expression with
// (?i). See the linked RE2 documentation for more flags.
//
// The match command does not support backreferences.
func (t Term) Match(args ...interface{}) Term {
return constructMethodTerm(t, "Match", p.Term_MATCH, args, map[string]interface{}{})
}
// Split splits a string into substrings. Splits on whitespace when called with no arguments.
// When called with a separator, splits on that separator. When called with a separator
// and a maximum number of splits, splits on that separator at most max_splits times.
// (Can be called with null as the separator if you want to split on whitespace while still
// specifying max_splits.)
//
// Mimics the behavior of Python's string.split in edge cases, except for splitting on the
// empty string, which instead produces an array of single-character strings.
func (t Term) Split(args ...interface{}) Term {
return constructMethodTerm(t, "Split", p.Term_SPLIT, funcWrapArgs(args), map[string]interface{}{})
}
// Upcase upper-cases a string.
func (t Term) Upcase(args ...interface{}) Term {
return constructMethodTerm(t, "Upcase", p.Term_UPCASE, args, map[string]interface{}{})
}
// Downcase lower-cases a string.
func (t Term) Downcase(args ...interface{}) Term {
return constructMethodTerm(t, "Downcase", p.Term_DOWNCASE, args, map[string]interface{}{})
}

View File

@ -1,173 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// TableCreateOpts contains the optional arguments for the TableCreate term
type TableCreateOpts struct {
PrimaryKey interface{} `gorethink:"primary_key,omitempty"`
Durability interface{} `gorethink:"durability,omitempty"`
Shards interface{} `gorethink:"shards,omitempty"`
Replicas interface{} `gorethink:"replicas,omitempty"`
PrimaryReplicaTag interface{} `gorethink:"primary_replica_tag,omitempty"`
NonVotingReplicaTags interface{} `gorethink:"nonvoting_replica_tags,omitempty"`
}
func (o TableCreateOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// TableCreate creates a table. A RethinkDB table is a collection of JSON
// documents.
//
// Note: Only alphanumeric characters and underscores are valid for the table name.
func TableCreate(name interface{}, optArgs ...TableCreateOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("TableCreate", p.Term_TABLE_CREATE, []interface{}{name}, opts)
}
// TableCreate creates a table. A RethinkDB table is a collection of JSON
// documents.
//
// Note: Only alphanumeric characters and underscores are valid for the table name.
func (t Term) TableCreate(name interface{}, optArgs ...TableCreateOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "TableCreate", p.Term_TABLE_CREATE, []interface{}{name}, opts)
}
// TableDrop deletes a table. The table and all its data will be deleted.
func TableDrop(args ...interface{}) Term {
return constructRootTerm("TableDrop", p.Term_TABLE_DROP, args, map[string]interface{}{})
}
// TableDrop deletes a table. The table and all its data will be deleted.
func (t Term) TableDrop(args ...interface{}) Term {
return constructMethodTerm(t, "TableDrop", p.Term_TABLE_DROP, args, map[string]interface{}{})
}
// TableList lists all table names in a database.
func TableList(args ...interface{}) Term {
return constructRootTerm("TableList", p.Term_TABLE_LIST, args, map[string]interface{}{})
}
// TableList lists all table names in a database.
func (t Term) TableList(args ...interface{}) Term {
return constructMethodTerm(t, "TableList", p.Term_TABLE_LIST, args, map[string]interface{}{})
}
// IndexCreateOpts contains the optional arguments for the IndexCreate term
type IndexCreateOpts struct {
Multi interface{} `gorethink:"multi,omitempty"`
Geo interface{} `gorethink:"geo,omitempty"`
}
func (o IndexCreateOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// IndexCreate creates a new secondary index on a table. Secondary indexes
// improve the speed of many read queries at the slight cost of increased
// storage space and decreased write performance.
//
// IndexCreate supports the creation of the following types of indexes, to create
// indexes using arbitrary expressions use IndexCreateFunc.
// - Simple indexes based on the value of a single field.
// - Geospatial indexes based on indexes of geometry objects, created when the
// geo optional argument is true.
func (t Term) IndexCreate(name interface{}, optArgs ...IndexCreateOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "IndexCreate", p.Term_INDEX_CREATE, []interface{}{name}, opts)
}
// IndexCreateFunc creates a new secondary index on a table. Secondary indexes
// improve the speed of many read queries at the slight cost of increased
// storage space and decreased write performance. The function takes a index
// name and RQL term as the index value , the term can be an anonymous function
// or a binary representation obtained from the function field of indexStatus.
//
// It supports the creation of the following types of indexes.
// - Simple indexes based on the value of a single field where the index has a
// different name to the field.
// - Compound indexes based on multiple fields.
// - Multi indexes based on arrays of values, created when the multi optional argument is true.
func (t Term) IndexCreateFunc(name, indexFunction interface{}, optArgs ...IndexCreateOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "IndexCreate", p.Term_INDEX_CREATE, []interface{}{name, funcWrap(indexFunction)}, opts)
}
// IndexDrop deletes a previously created secondary index of a table.
func (t Term) IndexDrop(args ...interface{}) Term {
return constructMethodTerm(t, "IndexDrop", p.Term_INDEX_DROP, args, map[string]interface{}{})
}
// IndexList lists all the secondary indexes of a table.
func (t Term) IndexList(args ...interface{}) Term {
return constructMethodTerm(t, "IndexList", p.Term_INDEX_LIST, args, map[string]interface{}{})
}
// IndexRenameOpts contains the optional arguments for the IndexRename term
type IndexRenameOpts struct {
Overwrite interface{} `gorethink:"overwrite,omitempty"`
}
func (o IndexRenameOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// IndexRename renames an existing secondary index on a table.
func (t Term) IndexRename(oldName, newName interface{}, optArgs ...IndexRenameOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "IndexRename", p.Term_INDEX_RENAME, []interface{}{oldName, newName}, opts)
}
// IndexStatus gets the status of the specified indexes on this table, or the
// status of all indexes on this table if no indexes are specified.
func (t Term) IndexStatus(args ...interface{}) Term {
return constructMethodTerm(t, "IndexStatus", p.Term_INDEX_STATUS, args, map[string]interface{}{})
}
// IndexWait waits for the specified indexes on this table to be ready, or for
// all indexes on this table to be ready if no indexes are specified.
func (t Term) IndexWait(args ...interface{}) Term {
return constructMethodTerm(t, "IndexWait", p.Term_INDEX_WAIT, args, map[string]interface{}{})
}
// ChangesOpts contains the optional arguments for the Changes term
type ChangesOpts struct {
Squash interface{} `gorethink:"squash,omitempty"`
IncludeInitial interface{} `gorethink:"include_initial,omitempty"`
IncludeStates interface{} `gorethink:"include_states,omitempty"`
IncludeOffsets interface{} `gorethink:"include_offsets,omitempty"`
IncludeTypes interface{} `gorethink:"include_types,omitempty"`
ChangefeedQueueSize interface{} `gorethink:"changefeed_queue_size,omitempty"`
}
// ChangesOpts contains the optional arguments for the Changes term
func (o ChangesOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Changes returns an infinite stream of objects representing changes to a query.
func (t Term) Changes(optArgs ...ChangesOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Changes", p.Term_CHANGES, []interface{}{}, opts)
}

View File

@ -1,187 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Now returns a time object representing the current time in UTC
func Now(args ...interface{}) Term {
return constructRootTerm("Now", p.Term_NOW, args, map[string]interface{}{})
}
// Time creates a time object for a specific time
func Time(args ...interface{}) Term {
return constructRootTerm("Time", p.Term_TIME, args, map[string]interface{}{})
}
// EpochTime returns a time object based on seconds since epoch
func EpochTime(args ...interface{}) Term {
return constructRootTerm("EpochTime", p.Term_EPOCH_TIME, args, map[string]interface{}{})
}
// ISO8601Opts contains the optional arguments for the ISO8601 term
type ISO8601Opts struct {
DefaultTimezone interface{} `gorethink:"default_timezone,omitempty"`
}
func (o ISO8601Opts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// ISO8601 returns a time object based on an ISO8601 formatted date-time string
func ISO8601(date interface{}, optArgs ...ISO8601Opts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructRootTerm("ISO8601", p.Term_ISO8601, []interface{}{date}, opts)
}
// InTimezone returns a new time object with a different time zone. While the
// time stays the same, the results returned by methods such as hours() will
// change since they take the timezone into account. The timezone argument
// has to be of the ISO 8601 format.
func (t Term) InTimezone(args ...interface{}) Term {
return constructMethodTerm(t, "InTimezone", p.Term_IN_TIMEZONE, args, map[string]interface{}{})
}
// Timezone returns the timezone of the time object
func (t Term) Timezone(args ...interface{}) Term {
return constructMethodTerm(t, "Timezone", p.Term_TIMEZONE, args, map[string]interface{}{})
}
// DuringOpts contains the optional arguments for the During term
type DuringOpts struct {
LeftBound interface{} `gorethink:"left_bound,omitempty"`
RightBound interface{} `gorethink:"right_bound,omitempty"`
}
func (o DuringOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// During returns true if a time is between two other times
// (by default, inclusive for the start, exclusive for the end).
func (t Term) During(startTime, endTime interface{}, optArgs ...DuringOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "During", p.Term_DURING, []interface{}{startTime, endTime}, opts)
}
// Date returns a new time object only based on the day, month and year
// (ie. the same day at 00:00).
func (t Term) Date(args ...interface{}) Term {
return constructMethodTerm(t, "Date", p.Term_DATE, args, map[string]interface{}{})
}
// TimeOfDay returns the number of seconds elapsed since the beginning of the
// day stored in the time object.
func (t Term) TimeOfDay(args ...interface{}) Term {
return constructMethodTerm(t, "TimeOfDay", p.Term_TIME_OF_DAY, args, map[string]interface{}{})
}
// Year returns the year of a time object.
func (t Term) Year(args ...interface{}) Term {
return constructMethodTerm(t, "Year", p.Term_YEAR, args, map[string]interface{}{})
}
// Month returns the month of a time object as a number between 1 and 12.
// For your convenience, the terms r.January(), r.February() etc. are
// defined and map to the appropriate integer.
func (t Term) Month(args ...interface{}) Term {
return constructMethodTerm(t, "Month", p.Term_MONTH, args, map[string]interface{}{})
}
// Day return the day of a time object as a number between 1 and 31.
func (t Term) Day(args ...interface{}) Term {
return constructMethodTerm(t, "Day", p.Term_DAY, args, map[string]interface{}{})
}
// DayOfWeek returns the day of week of a time object as a number between
// 1 and 7 (following ISO 8601 standard). For your convenience,
// the terms r.Monday(), r.Tuesday() etc. are defined and map to
// the appropriate integer.
func (t Term) DayOfWeek(args ...interface{}) Term {
return constructMethodTerm(t, "DayOfWeek", p.Term_DAY_OF_WEEK, args, map[string]interface{}{})
}
// DayOfYear returns the day of the year of a time object as a number between
// 1 and 366 (following ISO 8601 standard).
func (t Term) DayOfYear(args ...interface{}) Term {
return constructMethodTerm(t, "DayOfYear", p.Term_DAY_OF_YEAR, args, map[string]interface{}{})
}
// Hours returns the hour in a time object as a number between 0 and 23.
func (t Term) Hours(args ...interface{}) Term {
return constructMethodTerm(t, "Hours", p.Term_HOURS, args, map[string]interface{}{})
}
// Minutes returns the minute in a time object as a number between 0 and 59.
func (t Term) Minutes(args ...interface{}) Term {
return constructMethodTerm(t, "Minutes", p.Term_MINUTES, args, map[string]interface{}{})
}
// Seconds returns the seconds in a time object as a number between 0 and
// 59.999 (double precision).
func (t Term) Seconds(args ...interface{}) Term {
return constructMethodTerm(t, "Seconds", p.Term_SECONDS, args, map[string]interface{}{})
}
// ToISO8601 converts a time object to its iso 8601 format.
func (t Term) ToISO8601(args ...interface{}) Term {
return constructMethodTerm(t, "ToISO8601", p.Term_TO_ISO8601, args, map[string]interface{}{})
}
// ToEpochTime converts a time object to its epoch time.
func (t Term) ToEpochTime(args ...interface{}) Term {
return constructMethodTerm(t, "ToEpochTime", p.Term_TO_EPOCH_TIME, args, map[string]interface{}{})
}
var (
// Days
// Monday is a constant representing the day of the week Monday
Monday = constructRootTerm("Monday", p.Term_MONDAY, []interface{}{}, map[string]interface{}{})
// Tuesday is a constant representing the day of the week Tuesday
Tuesday = constructRootTerm("Tuesday", p.Term_TUESDAY, []interface{}{}, map[string]interface{}{})
// Wednesday is a constant representing the day of the week Wednesday
Wednesday = constructRootTerm("Wednesday", p.Term_WEDNESDAY, []interface{}{}, map[string]interface{}{})
// Thursday is a constant representing the day of the week Thursday
Thursday = constructRootTerm("Thursday", p.Term_THURSDAY, []interface{}{}, map[string]interface{}{})
// Friday is a constant representing the day of the week Friday
Friday = constructRootTerm("Friday", p.Term_FRIDAY, []interface{}{}, map[string]interface{}{})
// Saturday is a constant representing the day of the week Saturday
Saturday = constructRootTerm("Saturday", p.Term_SATURDAY, []interface{}{}, map[string]interface{}{})
// Sunday is a constant representing the day of the week Sunday
Sunday = constructRootTerm("Sunday", p.Term_SUNDAY, []interface{}{}, map[string]interface{}{})
// Months
// January is a constant representing the month January
January = constructRootTerm("January", p.Term_JANUARY, []interface{}{}, map[string]interface{}{})
// February is a constant representing the month February
February = constructRootTerm("February", p.Term_FEBRUARY, []interface{}{}, map[string]interface{}{})
// March is a constant representing the month March
March = constructRootTerm("March", p.Term_MARCH, []interface{}{}, map[string]interface{}{})
// April is a constant representing the month April
April = constructRootTerm("April", p.Term_APRIL, []interface{}{}, map[string]interface{}{})
// May is a constant representing the month May
May = constructRootTerm("May", p.Term_MAY, []interface{}{}, map[string]interface{}{})
// June is a constant representing the month June
June = constructRootTerm("June", p.Term_JUNE, []interface{}{}, map[string]interface{}{})
// July is a constant representing the month July
July = constructRootTerm("July", p.Term_JULY, []interface{}{}, map[string]interface{}{})
// August is a constant representing the month August
August = constructRootTerm("August", p.Term_AUGUST, []interface{}{}, map[string]interface{}{})
// September is a constant representing the month September
September = constructRootTerm("September", p.Term_SEPTEMBER, []interface{}{}, map[string]interface{}{})
// October is a constant representing the month October
October = constructRootTerm("October", p.Term_OCTOBER, []interface{}{}, map[string]interface{}{})
// November is a constant representing the month November
November = constructRootTerm("November", p.Term_NOVEMBER, []interface{}{}, map[string]interface{}{})
// December is a constant representing the month December
December = constructRootTerm("December", p.Term_DECEMBER, []interface{}{}, map[string]interface{}{})
)

View File

@ -1,193 +0,0 @@
package gorethink
import p "gopkg.in/gorethink/gorethink.v2/ql2"
// Map transform each element of the sequence by applying the given mapping
// function. It takes two arguments, a sequence and a function of type
// `func (r.Term) interface{}`.
//
// For example this query doubles each element in an array:
//
// r.Map([]int{1,3,6}, func (row r.Term) interface{} {
// return row.Mul(2)
// })
func Map(args ...interface{}) Term {
if len(args) > 0 {
args = append(args[:len(args)-1], funcWrap(args[len(args)-1]))
}
return constructRootTerm("Map", p.Term_MAP, args, map[string]interface{}{})
}
// Map transforms each element of the sequence by applying the given mapping
// function. It takes one argument of type `func (r.Term) interface{}`.
//
// For example this query doubles each element in an array:
//
// r.Expr([]int{1,3,6}).Map(func (row r.Term) interface{} {
// return row.Mul(2)
// })
func (t Term) Map(args ...interface{}) Term {
if len(args) > 0 {
args = append(args[:len(args)-1], funcWrap(args[len(args)-1]))
}
return constructMethodTerm(t, "Map", p.Term_MAP, args, map[string]interface{}{})
}
// WithFields takes a sequence of objects and a list of fields. If any objects in the
// sequence don't have all of the specified fields, they're dropped from the
// sequence. The remaining objects have the specified fields plucked out.
// (This is identical to `HasFields` followed by `Pluck` on a sequence.)
func (t Term) WithFields(args ...interface{}) Term {
return constructMethodTerm(t, "WithFields", p.Term_WITH_FIELDS, args, map[string]interface{}{})
}
// ConcatMap concatenates one or more elements into a single sequence using a
// mapping function. ConcatMap works in a similar fashion to Map, applying the
// given function to each element in a sequence, but it will always return a
// single sequence.
func (t Term) ConcatMap(args ...interface{}) Term {
return constructMethodTerm(t, "ConcatMap", p.Term_CONCAT_MAP, funcWrapArgs(args), map[string]interface{}{})
}
// OrderByOpts contains the optional arguments for the OrderBy term
type OrderByOpts struct {
Index interface{} `gorethink:"index,omitempty"`
}
func (o OrderByOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// OrderBy sorts the sequence by document values of the given key(s). To specify
// the ordering, wrap the attribute with either r.Asc or r.Desc (defaults to
// ascending).
//
// Sorting without an index requires the server to hold the sequence in memory,
// and is limited to 100,000 documents (or the setting of the ArrayLimit option
// for run). Sorting with an index can be done on arbitrarily large tables, or
// after a between command using the same index.
func (t Term) OrderBy(args ...interface{}) Term {
var opts = map[string]interface{}{}
// Look for options map
if len(args) > 0 {
if possibleOpts, ok := args[len(args)-1].(OrderByOpts); ok {
opts = possibleOpts.toMap()
args = args[:len(args)-1]
}
}
for k, arg := range args {
if t, ok := arg.(Term); !(ok && (t.termType == p.Term_DESC || t.termType == p.Term_ASC)) {
args[k] = funcWrap(arg)
}
}
return constructMethodTerm(t, "OrderBy", p.Term_ORDER_BY, args, opts)
}
// Desc is used by the OrderBy term to specify the ordering to be descending.
func Desc(args ...interface{}) Term {
return constructRootTerm("Desc", p.Term_DESC, funcWrapArgs(args), map[string]interface{}{})
}
// Asc is used by the OrderBy term to specify that the ordering be ascending (the
// default).
func Asc(args ...interface{}) Term {
return constructRootTerm("Asc", p.Term_ASC, funcWrapArgs(args), map[string]interface{}{})
}
// Skip skips a number of elements from the head of the sequence.
func (t Term) Skip(args ...interface{}) Term {
return constructMethodTerm(t, "Skip", p.Term_SKIP, args, map[string]interface{}{})
}
// Limit ends the sequence after the given number of elements.
func (t Term) Limit(args ...interface{}) Term {
return constructMethodTerm(t, "Limit", p.Term_LIMIT, args, map[string]interface{}{})
}
// SliceOpts contains the optional arguments for the Slice term
type SliceOpts struct {
LeftBound interface{} `gorethink:"left_bound,omitempty"`
RightBound interface{} `gorethink:"right_bound,omitempty"`
}
func (o SliceOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Slice trims the sequence to within the bounds provided.
func (t Term) Slice(args ...interface{}) Term {
var opts = map[string]interface{}{}
// Look for options map
if len(args) > 0 {
if possibleOpts, ok := args[len(args)-1].(SliceOpts); ok {
opts = possibleOpts.toMap()
args = args[:len(args)-1]
}
}
return constructMethodTerm(t, "Slice", p.Term_SLICE, args, opts)
}
// AtIndex gets a single field from an object or the nth element from a sequence.
func (t Term) AtIndex(args ...interface{}) Term {
return constructMethodTerm(t, "AtIndex", p.Term_BRACKET, args, map[string]interface{}{})
}
// Nth gets the nth element from a sequence.
func (t Term) Nth(args ...interface{}) Term {
return constructMethodTerm(t, "Nth", p.Term_NTH, args, map[string]interface{}{})
}
// OffsetsOf gets the indexes of an element in a sequence. If the argument is a
// predicate, get the indexes of all elements matching it.
func (t Term) OffsetsOf(args ...interface{}) Term {
return constructMethodTerm(t, "OffsetsOf", p.Term_OFFSETS_OF, funcWrapArgs(args), map[string]interface{}{})
}
// IsEmpty tests if a sequence is empty.
func (t Term) IsEmpty(args ...interface{}) Term {
return constructMethodTerm(t, "IsEmpty", p.Term_IS_EMPTY, args, map[string]interface{}{})
}
// UnionOpts contains the optional arguments for the Slice term
type UnionOpts struct {
Interleave interface{} `gorethink:"interleave,omitempty"`
}
func (o UnionOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Union concatenates two sequences.
func Union(args ...interface{}) Term {
return constructRootTerm("Union", p.Term_UNION, args, map[string]interface{}{})
}
// Union concatenates two sequences.
func (t Term) Union(args ...interface{}) Term {
return constructMethodTerm(t, "Union", p.Term_UNION, args, map[string]interface{}{})
}
// UnionWithOpts like Union concatenates two sequences however allows for optional
// arguments to be passed.
func UnionWithOpts(optArgs UnionOpts, args ...interface{}) Term {
return constructRootTerm("Union", p.Term_UNION, args, optArgs.toMap())
}
// UnionWithOpts like Union concatenates two sequences however allows for optional
// arguments to be passed.
func (t Term) UnionWithOpts(optArgs UnionOpts, args ...interface{}) Term {
return constructMethodTerm(t, "Union", p.Term_UNION, args, optArgs.toMap())
}
// Sample selects a given number of elements from a sequence with uniform random
// distribution. Selection is done without replacement.
func (t Term) Sample(args ...interface{}) Term {
return constructMethodTerm(t, "Sample", p.Term_SAMPLE, args, map[string]interface{}{})
}

View File

@ -1,98 +0,0 @@
package gorethink
import (
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// InsertOpts contains the optional arguments for the Insert term
type InsertOpts struct {
Durability interface{} `gorethink:"durability,omitempty"`
ReturnChanges interface{} `gorethink:"return_changes,omitempty"`
Conflict interface{} `gorethink:"conflict,omitempty"`
}
func (o InsertOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Insert documents into a table. Accepts a single document or an array
// of documents.
func (t Term) Insert(arg interface{}, optArgs ...InsertOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Insert", p.Term_INSERT, []interface{}{Expr(arg)}, opts)
}
// UpdateOpts contains the optional arguments for the Update term
type UpdateOpts struct {
Durability interface{} `gorethink:"durability,omitempty"`
ReturnChanges interface{} `gorethink:"return_changes,omitempty"`
NonAtomic interface{} `gorethink:"non_atomic,omitempty"`
Conflict interface{} `gorethink:"conflict,omitempty"`
}
func (o UpdateOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Update JSON documents in a table. Accepts a JSON document, a ReQL expression,
// or a combination of the two. You can pass options like returnChanges that will
// return the old and new values of the row you have modified.
func (t Term) Update(arg interface{}, optArgs ...UpdateOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Update", p.Term_UPDATE, []interface{}{funcWrap(arg)}, opts)
}
// ReplaceOpts contains the optional arguments for the Replace term
type ReplaceOpts struct {
Durability interface{} `gorethink:"durability,omitempty"`
ReturnChanges interface{} `gorethink:"return_changes,omitempty"`
NonAtomic interface{} `gorethink:"non_atomic,omitempty"`
}
func (o ReplaceOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Replace documents in a table. Accepts a JSON document or a ReQL expression,
// and replaces the original document with the new one. The new document must
// have the same primary key as the original document.
func (t Term) Replace(arg interface{}, optArgs ...ReplaceOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Replace", p.Term_REPLACE, []interface{}{funcWrap(arg)}, opts)
}
// DeleteOpts contains the optional arguments for the Delete term
type DeleteOpts struct {
Durability interface{} `gorethink:"durability,omitempty"`
ReturnChanges interface{} `gorethink:"return_changes,omitempty"`
}
func (o DeleteOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Delete one or more documents from a table.
func (t Term) Delete(optArgs ...DeleteOpts) Term {
opts := map[string]interface{}{}
if len(optArgs) >= 1 {
opts = optArgs[0].toMap()
}
return constructMethodTerm(t, "Delete", p.Term_DELETE, []interface{}{}, opts)
}
// Sync ensures that writes on a given table are written to permanent storage.
// Queries that specify soft durability do not give such guarantees, so Sync
// can be used to ensure the state of these queries. A call to Sync does not
// return until all previous writes to the table are persisted.
func (t Term) Sync(args ...interface{}) Term {
return constructMethodTerm(t, "Sync", p.Term_SYNC, args, map[string]interface{}{})
}

View File

@ -1,328 +0,0 @@
package gorethink
import (
"crypto/tls"
"sync"
"time"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// A Session represents a connection to a RethinkDB cluster and should be used
// when executing queries.
type Session struct {
hosts []Host
opts *ConnectOpts
mu sync.RWMutex
cluster *Cluster
closed bool
}
// ConnectOpts is used to specify optional arguments when connecting to a cluster.
type ConnectOpts struct {
// Address holds the address of the server initially used when creating the
// session. Only used if Addresses is empty
Address string `gorethink:"address,omitempty"`
// Addresses holds the addresses of the servers initially used when creating
// the session.
Addresses []string `gorethink:"addresses,omitempty"`
// Database is the default database name used when executing queries, this
// value is only used if the query does not contain any DB term
Database string `gorethink:"database,omitempty"`
// Username holds the username used for authentication, if blank (and the v1
// handshake protocol is being used) then the admin user is used
Username string `gorethink:"username,omitempty"`
// Password holds the password used for authentication (only used when using
// the v1 handshake protocol)
Password string `gorethink:"password,omitempty"`
// AuthKey is used for authentication when using the v0.4 handshake protocol
// This field is no deprecated
AuthKey string `gorethink:"authkey,omitempty"`
// Timeout is the time the driver waits when creating new connections, to
// configure the timeout used when executing queries use WriteTimeout and
// ReadTimeout
Timeout time.Duration `gorethink:"timeout,omitempty"`
// WriteTimeout is the amount of time the driver will wait when sending the
// query to the server
WriteTimeout time.Duration `gorethink:"write_timeout,omitempty"`
// ReadTimeout is the amount of time the driver will wait for a response from
// the server when executing queries.
ReadTimeout time.Duration `gorethink:"read_timeout,omitempty"`
// KeepAlivePeriod is the keep alive period used by the connection, by default
// this is 30s. It is not possible to disable keep alive messages
KeepAlivePeriod time.Duration `gorethink:"keep_alive_timeout,omitempty"`
// TLSConfig holds the TLS configuration and can be used when connecting
// to a RethinkDB server protected by SSL
TLSConfig *tls.Config `gorethink:"tlsconfig,omitempty"`
// HandshakeVersion is used to specify which handshake version should be
// used, this currently defaults to v1 which is used by RethinkDB 2.3 and
// later. If you are using an older version then you can set the handshake
// version to 0.4
HandshakeVersion HandshakeVersion `gorethink:"handshake_version,omitempty"`
// UseJSONNumber indicates whether the cursors running in this session should
// use json.Number instead of float64 while unmarshaling documents with
// interface{}. The default is `false`.
UseJSONNumber bool
// NumRetries is the number of times a query is retried if a connection
// error is detected, queries are not retried if RethinkDB returns a
// runtime error.
NumRetries int
// InitialCap is used by the internal connection pool and is used to
// configure how many connections are created for each host when the
// session is created. If zero then no connections are created until
// the first query is executed.
InitialCap int `gorethink:"initial_cap,omitempty"`
// MaxOpen is used by the internal connection pool and is used to configure
// the maximum number of connections held in the pool. If all available
// connections are being used then the driver will open new connections as
// needed however they will not be returned to the pool. By default the
// maximum number of connections is 2
MaxOpen int `gorethink:"max_open,omitempty"`
// Below options are for cluster discovery, please note there is a high
// probability of these changing as the API is still being worked on.
// DiscoverHosts is used to enable host discovery, when true the driver
// will attempt to discover any new nodes added to the cluster and then
// start sending queries to these new nodes.
DiscoverHosts bool `gorethink:"discover_hosts,omitempty"`
// HostDecayDuration is used by the go-hostpool package to calculate a weighted
// score when selecting a host. By default a value of 5 minutes is used.
HostDecayDuration time.Duration
// Deprecated: This function is no longer used due to changes in the
// way hosts are selected.
NodeRefreshInterval time.Duration `gorethink:"node_refresh_interval,omitempty"`
// Deprecated: Use InitialCap instead
MaxIdle int `gorethink:"max_idle,omitempty"`
}
func (o ConnectOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// Connect creates a new database session. To view the available connection
// options see ConnectOpts.
//
// By default maxIdle and maxOpen are set to 1: passing values greater
// than the default (e.g. MaxIdle: "10", MaxOpen: "20") will provide a
// pool of re-usable connections.
//
// Basic connection example:
//
// session, err := r.Connect(r.ConnectOpts{
// Host: "localhost:28015",
// Database: "test",
// AuthKey: "14daak1cad13dj",
// })
//
// Cluster connection example:
//
// session, err := r.Connect(r.ConnectOpts{
// Hosts: []string{"localhost:28015", "localhost:28016"},
// Database: "test",
// AuthKey: "14daak1cad13dj",
// })
func Connect(opts ConnectOpts) (*Session, error) {
var addresses = opts.Addresses
if len(addresses) == 0 {
addresses = []string{opts.Address}
}
hosts := make([]Host, len(addresses))
for i, address := range addresses {
hostname, port := splitAddress(address)
hosts[i] = NewHost(hostname, port)
}
if len(hosts) <= 0 {
return nil, ErrNoHosts
}
// Connect
s := &Session{
hosts: hosts,
opts: &opts,
}
err := s.Reconnect()
if err != nil {
// note: s.Reconnect() will initialize cluster information which
// will cause the .IsConnected() method to be caught in a loop
return &Session{
hosts: hosts,
opts: &opts,
}, err
}
return s, nil
}
// CloseOpts allows calls to the Close function to be configured.
type CloseOpts struct {
NoReplyWait bool `gorethink:"noreplyWait,omitempty"`
}
func (o CloseOpts) toMap() map[string]interface{} {
return optArgsToMap(o)
}
// IsConnected returns true if session has a valid connection.
func (s *Session) IsConnected() bool {
s.mu.Lock()
defer s.mu.Unlock()
if s.cluster == nil || s.closed {
return false
}
return s.cluster.IsConnected()
}
// Reconnect closes and re-opens a session.
func (s *Session) Reconnect(optArgs ...CloseOpts) error {
var err error
if err = s.Close(optArgs...); err != nil {
return err
}
s.mu.Lock()
s.cluster, err = NewCluster(s.hosts, s.opts)
if err != nil {
s.mu.Unlock()
return err
}
s.closed = false
s.mu.Unlock()
return nil
}
// Close closes the session
func (s *Session) Close(optArgs ...CloseOpts) error {
s.mu.Lock()
defer s.mu.Unlock()
if s.closed {
return nil
}
if len(optArgs) >= 1 {
if optArgs[0].NoReplyWait {
s.mu.Unlock()
s.NoReplyWait()
s.mu.Lock()
}
}
if s.cluster != nil {
s.cluster.Close()
}
s.cluster = nil
s.closed = true
return nil
}
// SetInitialPoolCap sets the initial capacity of the connection pool.
func (s *Session) SetInitialPoolCap(n int) {
s.mu.Lock()
defer s.mu.Unlock()
s.opts.InitialCap = n
s.cluster.SetInitialPoolCap(n)
}
// SetMaxIdleConns sets the maximum number of connections in the idle
// connection pool.
func (s *Session) SetMaxIdleConns(n int) {
s.mu.Lock()
defer s.mu.Unlock()
s.opts.MaxIdle = n
s.cluster.SetMaxIdleConns(n)
}
// SetMaxOpenConns sets the maximum number of open connections to the database.
func (s *Session) SetMaxOpenConns(n int) {
s.mu.Lock()
defer s.mu.Unlock()
s.opts.MaxOpen = n
s.cluster.SetMaxOpenConns(n)
}
// NoReplyWait ensures that previous queries with the noreply flag have been
// processed by the server. Note that this guarantee only applies to queries
// run on the given connection
func (s *Session) NoReplyWait() error {
s.mu.RLock()
defer s.mu.RUnlock()
if s.closed {
return ErrConnectionClosed
}
return s.cluster.Exec(Query{
Type: p.Query_NOREPLY_WAIT,
})
}
// Use changes the default database used
func (s *Session) Use(database string) {
s.mu.Lock()
defer s.mu.Unlock()
s.opts.Database = database
}
// Database returns the selected database set by Use
func (s *Session) Database() string {
s.mu.RLock()
defer s.mu.RUnlock()
return s.opts.Database
}
// Query executes a ReQL query using the session to connect to the database
func (s *Session) Query(q Query) (*Cursor, error) {
s.mu.RLock()
defer s.mu.RUnlock()
if s.closed {
return nil, ErrConnectionClosed
}
return s.cluster.Query(q)
}
// Exec executes a ReQL query using the session to connect to the database
func (s *Session) Exec(q Query) error {
s.mu.RLock()
defer s.mu.RUnlock()
if s.closed {
return ErrConnectionClosed
}
return s.cluster.Exec(q)
}
// Server returns the server name and server UUID being used by a connection.
func (s *Session) Server() (ServerResponse, error) {
return s.cluster.Server()
}
// SetHosts resets the hosts used when connecting to the RethinkDB cluster
func (s *Session) SetHosts(hosts []Host) {
s.mu.Lock()
defer s.mu.Unlock()
s.hosts = hosts
}
func (s *Session) newQuery(t Term, opts map[string]interface{}) (Query, error) {
return newQuery(t, opts, s.opts)
}

View File

@ -1,283 +0,0 @@
package gorethink
import (
"reflect"
"strconv"
"strings"
"sync/atomic"
"gopkg.in/gorethink/gorethink.v2/encoding"
p "gopkg.in/gorethink/gorethink.v2/ql2"
)
// Helper functions for constructing terms
// constructRootTerm is an alias for creating a new term.
func constructRootTerm(name string, termType p.Term_TermType, args []interface{}, optArgs map[string]interface{}) Term {
return Term{
name: name,
rootTerm: true,
termType: termType,
args: convertTermList(args),
optArgs: convertTermObj(optArgs),
}
}
// constructMethodTerm is an alias for creating a new term. Unlike constructRootTerm
// this function adds the previous expression in the tree to the argument list to
// create a method term.
func constructMethodTerm(prevVal Term, name string, termType p.Term_TermType, args []interface{}, optArgs map[string]interface{}) Term {
args = append([]interface{}{prevVal}, args...)
return Term{
name: name,
rootTerm: false,
termType: termType,
args: convertTermList(args),
optArgs: convertTermObj(optArgs),
}
}
// Helper functions for creating internal RQL types
func newQuery(t Term, qopts map[string]interface{}, copts *ConnectOpts) (q Query, err error) {
queryOpts := map[string]interface{}{}
for k, v := range qopts {
queryOpts[k], err = Expr(v).Build()
if err != nil {
return
}
}
if copts.Database != "" {
queryOpts["db"], err = DB(copts.Database).Build()
if err != nil {
return
}
}
builtTerm, err := t.Build()
if err != nil {
return q, err
}
// Construct query
return Query{
Type: p.Query_START,
Term: &t,
Opts: queryOpts,
builtTerm: builtTerm,
}, nil
}
// makeArray takes a slice of terms and produces a single MAKE_ARRAY term
func makeArray(args termsList) Term {
return Term{
name: "[...]",
termType: p.Term_MAKE_ARRAY,
args: args,
}
}
// makeObject takes a map of terms and produces a single MAKE_OBJECT term
func makeObject(args termsObj) Term {
return Term{
name: "{...}",
termType: p.Term_MAKE_OBJ,
optArgs: args,
}
}
var nextVarID int64
func makeFunc(f interface{}) Term {
value := reflect.ValueOf(f)
valueType := value.Type()
var argNums = make([]interface{}, valueType.NumIn())
var args = make([]reflect.Value, valueType.NumIn())
for i := 0; i < valueType.NumIn(); i++ {
// Get a slice of the VARs to use as the function arguments
varID := atomic.AddInt64(&nextVarID, 1)
args[i] = reflect.ValueOf(constructRootTerm("var", p.Term_VAR, []interface{}{varID}, map[string]interface{}{}))
argNums[i] = varID
// make sure all input arguments are of type Term
argValueTypeName := valueType.In(i).String()
if argValueTypeName != "gorethink.Term" && argValueTypeName != "interface {}" {
panic("Function argument is not of type Term or interface {}")
}
}
if valueType.NumOut() != 1 {
panic("Function does not have a single return value")
}
body := value.Call(args)[0].Interface()
argsArr := makeArray(convertTermList(argNums))
return constructRootTerm("func", p.Term_FUNC, []interface{}{argsArr, body}, map[string]interface{}{})
}
func funcWrap(value interface{}) Term {
val := Expr(value)
if implVarScan(val) && val.termType != p.Term_ARGS {
return makeFunc(func(x Term) Term {
return val
})
}
return val
}
func funcWrapArgs(args []interface{}) []interface{} {
for i, arg := range args {
args[i] = funcWrap(arg)
}
return args
}
// implVarScan recursivly checks a value to see if it contains an
// IMPLICIT_VAR term. If it does it returns true
func implVarScan(value Term) bool {
if value.termType == p.Term_IMPLICIT_VAR {
return true
}
for _, v := range value.args {
if implVarScan(v) {
return true
}
}
for _, v := range value.optArgs {
if implVarScan(v) {
return true
}
}
return false
}
// Convert an opt args struct to a map.
func optArgsToMap(optArgs OptArgs) map[string]interface{} {
data, err := encode(optArgs)
if err == nil && data != nil {
if m, ok := data.(map[string]interface{}); ok {
return m
}
}
return map[string]interface{}{}
}
// Convert a list into a slice of terms
func convertTermList(l []interface{}) termsList {
if len(l) == 0 {
return nil
}
terms := make(termsList, len(l))
for i, v := range l {
terms[i] = Expr(v)
}
return terms
}
// Convert a map into a map of terms
func convertTermObj(o map[string]interface{}) termsObj {
if len(o) == 0 {
return nil
}
terms := make(termsObj, len(o))
for k, v := range o {
terms[k] = Expr(v)
}
return terms
}
// Helper functions for debugging
func allArgsToStringSlice(args termsList, optArgs termsObj) []string {
allArgs := make([]string, len(args)+len(optArgs))
i := 0
for _, v := range args {
allArgs[i] = v.String()
i++
}
for k, v := range optArgs {
allArgs[i] = k + "=" + v.String()
i++
}
return allArgs
}
func argsToStringSlice(args termsList) []string {
allArgs := make([]string, len(args))
for i, v := range args {
allArgs[i] = v.String()
}
return allArgs
}
func optArgsToStringSlice(optArgs termsObj) []string {
allArgs := make([]string, len(optArgs))
i := 0
for k, v := range optArgs {
allArgs[i] = k + "=" + v.String()
i++
}
return allArgs
}
func splitAddress(address string) (hostname string, port int) {
hostname = "localhost"
port = 28015
addrParts := strings.Split(address, ":")
if len(addrParts) >= 1 {
hostname = addrParts[0]
}
if len(addrParts) >= 2 {
port, _ = strconv.Atoi(addrParts[1])
}
return
}
func encode(data interface{}) (interface{}, error) {
if _, ok := data.(Term); ok {
return data, nil
}
v, err := encoding.Encode(data)
if err != nil {
return nil, err
}
return v, nil
}
// shouldRetryQuery checks the result of a query and returns true if the query
// should be retried
func shouldRetryQuery(q Query, err error) bool {
if err == nil {
return false
}
if _, ok := err.(RQLConnectionError); ok {
return true
}
return err == ErrConnectionClosed
}

View File

@ -1,64 +0,0 @@
package logrus
// The following code was sourced and modified from the
// https://bitbucket.org/tebeka/atexit package governed by the following license:
//
// Copyright (c) 2012 Miki Tebeka <miki.tebeka@gmail.com>.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of
// this software and associated documentation files (the "Software"), to deal in
// the Software without restriction, including without limitation the rights to
// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
// the Software, and to permit persons to whom the Software is furnished to do so,
// subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import (
"fmt"
"os"
)
var handlers = []func(){}
func runHandler(handler func()) {
defer func() {
if err := recover(); err != nil {
fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
}
}()
handler()
}
func runHandlers() {
for _, handler := range handlers {
runHandler(handler)
}
}
// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
func Exit(code int) {
runHandlers()
os.Exit(code)
}
// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
// all handlers. The handlers will also be invoked when any Fatal log entry is
// made.
//
// This method is useful when a caller wishes to use logrus to log a fatal
// message but also needs to gracefully shutdown. An example usecase could be
// closing database connections, or sending a alert that the application is
// closing.
func RegisterExitHandler(handler func()) {
handlers = append(handlers, handler)
}

View File

@ -1,26 +0,0 @@
/*
Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
log "github.com/Sirupsen/logrus"
)
func main() {
log.WithFields(log.Fields{
"animal": "walrus",
"number": 1,
"size": 10,
}).Info("A walrus appears")
}
Output:
time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
For a full guide visit https://github.com/Sirupsen/logrus
*/
package logrus

View File

@ -1,275 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"os"
"sync"
"time"
)
var bufferPool *sync.Pool
func init() {
bufferPool = &sync.Pool{
New: func() interface{} {
return new(bytes.Buffer)
},
}
}
// Defines the key when adding errors using WithError.
var ErrorKey = "error"
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
// passed around as much as you wish to avoid field duplication.
type Entry struct {
Logger *Logger
// Contains all the fields set by the user.
Data Fields
// Time at which the log entry was created
Time time.Time
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
Level Level
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
// When formatter is called in entry.log(), an Buffer may be set to entry
Buffer *bytes.Buffer
}
func NewEntry(logger *Logger) *Entry {
return &Entry{
Logger: logger,
// Default is three fields, give a little extra room
Data: make(Fields, 5),
}
}
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
return "", err
}
str := string(serialized)
return str, nil
}
// Add an error as single field (using the key defined in ErrorKey) to the Entry.
func (entry *Entry) WithError(err error) *Entry {
return entry.WithField(ErrorKey, err)
}
// Add a single field to the Entry.
func (entry *Entry) WithField(key string, value interface{}) *Entry {
return entry.WithFields(Fields{key: value})
}
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
for k, v := range fields {
data[k] = v
}
return &Entry{Logger: entry.Logger, Data: data}
}
// This function is not declared with a pointer value because otherwise
// race conditions will occur when using multiple goroutines
func (entry Entry) log(level Level, msg string) {
var buffer *bytes.Buffer
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
buffer = bufferPool.Get().(*bytes.Buffer)
buffer.Reset()
defer bufferPool.Put(buffer)
entry.Buffer = buffer
serialized, err := entry.Logger.Formatter.Format(&entry)
entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
} else {
entry.Logger.mu.Lock()
_, err = entry.Logger.Out.Write(serialized)
if err != nil {
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
}
entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
panic(&entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Print(args ...interface{}) {
entry.Info(args...)
}
func (entry *Entry) Info(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warning(args ...interface{}) {
entry.Warn(args...)
}
func (entry *Entry) Error(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
}
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Printf(format string, args ...interface{}) {
entry.Infof(format, args...)
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Warningf(format string, args ...interface{}) {
entry.Warnf(format, args...)
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
if entry.Logger.Level >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
if entry.Logger.Level >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
func (entry *Entry) Println(args ...interface{}) {
entry.Infoln(args...)
}
func (entry *Entry) Warnln(args ...interface{}) {
if entry.Logger.Level >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
func (entry *Entry) Warningln(args ...interface{}) {
entry.Warnln(args...)
}
func (entry *Entry) Errorln(args ...interface{}) {
if entry.Logger.Level >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
if entry.Logger.Level >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
if entry.Logger.Level >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
// Sprintlnn => Sprint no newline. This is to get the behavior of how
// fmt.Sprintln where spaces are always added between operands, regardless of
// their type. Instead of vendoring the Sprintln implementation to spare a
// string allocation, we do the simplest thing.
func (entry *Entry) sprintlnn(args ...interface{}) string {
msg := fmt.Sprintln(args...)
return msg[:len(msg)-1]
}

View File

@ -1,193 +0,0 @@
package logrus
import (
"io"
)
var (
// std is the name of the standard logger in stdlib `log`
std = New()
)
func StandardLogger() *Logger {
return std
}
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
defer std.mu.Unlock()
std.Out = out
}
// SetFormatter sets the standard logger formatter.
func SetFormatter(formatter Formatter) {
std.mu.Lock()
defer std.mu.Unlock()
std.Formatter = formatter
}
// SetLevel sets the standard logger level.
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
std.Level = level
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
std.mu.Lock()
defer std.mu.Unlock()
return std.Level
}
// AddHook adds a hook to the standard logger hooks.
func AddHook(hook Hook) {
std.mu.Lock()
defer std.mu.Unlock()
std.Hooks.Add(hook)
}
// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
func WithError(err error) *Entry {
return std.WithField(ErrorKey, err)
}
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithField(key string, value interface{}) *Entry {
return std.WithField(key, value)
}
// WithFields creates an entry from the standard logger and adds multiple
// fields to it. This is simply a helper for `WithField`, invoking it
// once for each field.
//
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
// or Panic on the Entry it returns.
func WithFields(fields Fields) *Entry {
return std.WithFields(fields)
}
// Debug logs a message at level Debug on the standard logger.
func Debug(args ...interface{}) {
std.Debug(args...)
}
// Print logs a message at level Info on the standard logger.
func Print(args ...interface{}) {
std.Print(args...)
}
// Info logs a message at level Info on the standard logger.
func Info(args ...interface{}) {
std.Info(args...)
}
// Warn logs a message at level Warn on the standard logger.
func Warn(args ...interface{}) {
std.Warn(args...)
}
// Warning logs a message at level Warn on the standard logger.
func Warning(args ...interface{}) {
std.Warning(args...)
}
// Error logs a message at level Error on the standard logger.
func Error(args ...interface{}) {
std.Error(args...)
}
// Panic logs a message at level Panic on the standard logger.
func Panic(args ...interface{}) {
std.Panic(args...)
}
// Fatal logs a message at level Fatal on the standard logger.
func Fatal(args ...interface{}) {
std.Fatal(args...)
}
// Debugf logs a message at level Debug on the standard logger.
func Debugf(format string, args ...interface{}) {
std.Debugf(format, args...)
}
// Printf logs a message at level Info on the standard logger.
func Printf(format string, args ...interface{}) {
std.Printf(format, args...)
}
// Infof logs a message at level Info on the standard logger.
func Infof(format string, args ...interface{}) {
std.Infof(format, args...)
}
// Warnf logs a message at level Warn on the standard logger.
func Warnf(format string, args ...interface{}) {
std.Warnf(format, args...)
}
// Warningf logs a message at level Warn on the standard logger.
func Warningf(format string, args ...interface{}) {
std.Warningf(format, args...)
}
// Errorf logs a message at level Error on the standard logger.
func Errorf(format string, args ...interface{}) {
std.Errorf(format, args...)
}
// Panicf logs a message at level Panic on the standard logger.
func Panicf(format string, args ...interface{}) {
std.Panicf(format, args...)
}
// Fatalf logs a message at level Fatal on the standard logger.
func Fatalf(format string, args ...interface{}) {
std.Fatalf(format, args...)
}
// Debugln logs a message at level Debug on the standard logger.
func Debugln(args ...interface{}) {
std.Debugln(args...)
}
// Println logs a message at level Info on the standard logger.
func Println(args ...interface{}) {
std.Println(args...)
}
// Infoln logs a message at level Info on the standard logger.
func Infoln(args ...interface{}) {
std.Infoln(args...)
}
// Warnln logs a message at level Warn on the standard logger.
func Warnln(args ...interface{}) {
std.Warnln(args...)
}
// Warningln logs a message at level Warn on the standard logger.
func Warningln(args ...interface{}) {
std.Warningln(args...)
}
// Errorln logs a message at level Error on the standard logger.
func Errorln(args ...interface{}) {
std.Errorln(args...)
}
// Panicln logs a message at level Panic on the standard logger.
func Panicln(args ...interface{}) {
std.Panicln(args...)
}
// Fatalln logs a message at level Fatal on the standard logger.
func Fatalln(args ...interface{}) {
std.Fatalln(args...)
}

View File

@ -1,45 +0,0 @@
package logrus
import "time"
const DefaultTimestampFormat = time.RFC3339
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
// * `entry.Data["time"]`. The timestamp.
// * `entry.Data["level"]. The level the entry was logged at.
//
// Any additional fields added with `WithField` or `WithFields` are also in
// `entry.Data`. Format is expected to return an array of bytes which are then
// logged to `logger.Out`.
type Formatter interface {
Format(*Entry) ([]byte, error)
}
// This is to not silently overwrite `time`, `msg` and `level` fields when
// dumping it. If this code wasn't there doing:
//
// logrus.WithField("level", 1).Info("hello")
//
// Would just silently drop the user provided level. Instead with this code
// it'll logged as:
//
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
//
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
if t, ok := data["time"]; ok {
data["fields.time"] = t
}
if m, ok := data["msg"]; ok {
data["fields.msg"] = m
}
if l, ok := data["level"]; ok {
data["fields.level"] = l
}
}

View File

@ -1,34 +0,0 @@
package logrus
// A hook to be fired when logging on the logging levels returned from
// `Levels()` on your implementation of the interface. Note that this is not
// fired in a goroutine or a channel with workers, you should handle such
// functionality yourself if your call is non-blocking and you don't wish for
// the logging calls for levels returned from `Levels()` to block.
type Hook interface {
Levels() []Level
Fire(*Entry) error
}
// Internal type for storing the hooks on a logger instance.
type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
}
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
}
}
return nil
}

View File

@ -1,74 +0,0 @@
package logrus
import (
"encoding/json"
"fmt"
)
type fieldKey string
type FieldMap map[fieldKey]string
const (
FieldKeyMsg = "msg"
FieldKeyLevel = "level"
FieldKeyTime = "time"
)
func (f FieldMap) resolve(key fieldKey) string {
if k, ok := f[key]; ok {
return k
}
return string(key)
}
type JSONFormatter struct {
// TimestampFormat sets the format used for marshaling timestamps.
TimestampFormat string
// DisableTimestamp allows disabling automatic timestamps in output
DisableTimestamp bool
// FieldMap allows users to customize the names of keys for various fields.
// As an example:
// formatter := &JSONFormatter{
// FieldMap: FieldMap{
// FieldKeyTime: "@timestamp",
// FieldKeyLevel: "@level",
// FieldKeyLevel: "@message",
// },
// }
FieldMap FieldMap
}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
switch v := v.(type) {
case error:
// Otherwise errors are ignored by `encoding/json`
// https://github.com/Sirupsen/logrus/issues/137
data[k] = v.Error()
default:
data[k] = v
}
}
prefixFieldClashes(data)
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if !f.DisableTimestamp {
data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
}
data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
}
return append(serialized, '\n'), nil
}

View File

@ -1,308 +0,0 @@
package logrus
import (
"io"
"os"
"sync"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
// file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
// logs with colors, but to a file it wouldn't. You can easily implement your
// own that implements the `Formatter` interface, see the `README` or included
// formatters for examples.
Formatter Formatter
// The logging level the logger should log at. This is typically (and defaults
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
// Used to sync writing to the log. Locking is enabled by Default
mu MutexWrap
// Reusable empty entry
entryPool sync.Pool
}
type MutexWrap struct {
lock sync.Mutex
disabled bool
}
func (mw *MutexWrap) Lock() {
if !mw.disabled {
mw.lock.Lock()
}
}
func (mw *MutexWrap) Unlock() {
if !mw.disabled {
mw.lock.Unlock()
}
}
func (mw *MutexWrap) Disable() {
mw.disabled = true
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
// `Out` and `Hooks` directly on the default logger instance. You can also just
// instantiate your own:
//
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
// Hooks: make(LevelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
Out: os.Stderr,
Formatter: new(TextFormatter),
Hooks: make(LevelHooks),
Level: InfoLevel,
}
}
func (logger *Logger) newEntry() *Entry {
entry, ok := logger.entryPool.Get().(*Entry)
if ok {
return entry
}
return NewEntry(logger)
}
func (logger *Logger) releaseEntry(entry *Entry) {
logger.entryPool.Put(entry)
}
// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithFields(fields)
}
// Add an error as single field to the log entry. All it does is call
// `WithError` for the given `error`.
func (logger *Logger) WithError(err error) *Entry {
entry := logger.newEntry()
defer logger.releaseEntry(entry)
return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
if logger.Level >= DebugLevel {
entry := logger.newEntry()
entry.Debugf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infof(format string, args ...interface{}) {
if logger.Level >= InfoLevel {
entry := logger.newEntry()
entry.Infof(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Printf(format string, args ...interface{}) {
entry := logger.newEntry()
entry.Printf(format, args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
if logger.Level >= ErrorLevel {
entry := logger.newEntry()
entry.Errorf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
if logger.Level >= FatalLevel {
entry := logger.newEntry()
entry.Fatalf(format, args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
if logger.Level >= PanicLevel {
entry := logger.newEntry()
entry.Panicf(format, args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debug(args ...interface{}) {
if logger.Level >= DebugLevel {
entry := logger.newEntry()
entry.Debug(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Info(args ...interface{}) {
if logger.Level >= InfoLevel {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Print(args ...interface{}) {
entry := logger.newEntry()
entry.Info(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warning(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warn(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Error(args ...interface{}) {
if logger.Level >= ErrorLevel {
entry := logger.newEntry()
entry.Error(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatal(args ...interface{}) {
if logger.Level >= FatalLevel {
entry := logger.newEntry()
entry.Fatal(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
if logger.Level >= PanicLevel {
entry := logger.newEntry()
entry.Panic(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Debugln(args ...interface{}) {
if logger.Level >= DebugLevel {
entry := logger.newEntry()
entry.Debugln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Infoln(args ...interface{}) {
if logger.Level >= InfoLevel {
entry := logger.newEntry()
entry.Infoln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Println(args ...interface{}) {
entry := logger.newEntry()
entry.Println(args...)
logger.releaseEntry(entry)
}
func (logger *Logger) Warnln(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Warningln(args ...interface{}) {
if logger.Level >= WarnLevel {
entry := logger.newEntry()
entry.Warnln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Errorln(args ...interface{}) {
if logger.Level >= ErrorLevel {
entry := logger.newEntry()
entry.Errorln(args...)
logger.releaseEntry(entry)
}
}
func (logger *Logger) Fatalln(args ...interface{}) {
if logger.Level >= FatalLevel {
entry := logger.newEntry()
entry.Fatalln(args...)
logger.releaseEntry(entry)
}
Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
if logger.Level >= PanicLevel {
entry := logger.newEntry()
entry.Panicln(args...)
logger.releaseEntry(entry)
}
}
//When file is opened with appending mode, it's safe to
//write concurrently to a file (within 4k message on Linux).
//In these cases user can choose to disable the lock.
func (logger *Logger) SetNoLock() {
logger.mu.Disable()
}

View File

@ -1,143 +0,0 @@
package logrus
import (
"fmt"
"log"
"strings"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
type Level uint8
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
switch level {
case DebugLevel:
return "debug"
case InfoLevel:
return "info"
case WarnLevel:
return "warning"
case ErrorLevel:
return "error"
case FatalLevel:
return "fatal"
case PanicLevel:
return "panic"
}
return "unknown"
}
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
return FatalLevel, nil
case "error":
return ErrorLevel, nil
case "warn", "warning":
return WarnLevel, nil
case "info":
return InfoLevel, nil
case "debug":
return DebugLevel, nil
}
var l Level
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
// A constant exposing all logging levels
var AllLevels = []Level{
PanicLevel,
FatalLevel,
ErrorLevel,
WarnLevel,
InfoLevel,
DebugLevel,
}
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
// PanicLevel level, highest level of severity. Logs and then calls panic with the
// message passed to Debug, Info, ...
PanicLevel Level = iota
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
// logging level is set to Panic.
FatalLevel
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
// Commonly used for hooks to send errors to an error tracking service.
ErrorLevel
// WarnLevel level. Non-critical entries that deserve eyes.
WarnLevel
// InfoLevel level. General operational entries about what's going on inside the
// application.
InfoLevel
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
DebugLevel
)
// Won't compile if StdLogger can't be realized by a log.Logger
var (
_ StdLogger = &log.Logger{}
_ StdLogger = &Entry{}
_ StdLogger = &Logger{}
)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
// interface, this is the closest we get, unfortunately.
type StdLogger interface {
Print(...interface{})
Printf(string, ...interface{})
Println(...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
Fatalln(...interface{})
Panic(...interface{})
Panicf(string, ...interface{})
Panicln(...interface{})
}
// The FieldLogger interface generalizes the Entry and Logger types
type FieldLogger interface {
WithField(key string, value interface{}) *Entry
WithFields(fields Fields) *Entry
WithError(err error) *Entry
Debugf(format string, args ...interface{})
Infof(format string, args ...interface{})
Printf(format string, args ...interface{})
Warnf(format string, args ...interface{})
Warningf(format string, args ...interface{})
Errorf(format string, args ...interface{})
Fatalf(format string, args ...interface{})
Panicf(format string, args ...interface{})
Debug(args ...interface{})
Info(args ...interface{})
Print(args ...interface{})
Warn(args ...interface{})
Warning(args ...interface{})
Error(args ...interface{})
Fatal(args ...interface{})
Panic(args ...interface{})
Debugln(args ...interface{})
Infoln(args ...interface{})
Println(args ...interface{})
Warnln(args ...interface{})
Warningln(args ...interface{})
Errorln(args ...interface{})
Fatalln(args ...interface{})
Panicln(args ...interface{})
}

View File

@ -1,8 +0,0 @@
// +build appengine
package logrus
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
return true
}

View File

@ -1,10 +0,0 @@
// +build darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import "syscall"
const ioctlReadTermios = syscall.TIOCGETA
type Termios syscall.Termios

View File

@ -1,14 +0,0 @@
// Based on ssh/terminal:
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !appengine
package logrus
import "syscall"
const ioctlReadTermios = syscall.TCGETS
type Termios syscall.Termios

View File

@ -1,22 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build linux darwin freebsd openbsd netbsd dragonfly
// +build !appengine
package logrus
import (
"syscall"
"unsafe"
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var termios Termios
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
return err == 0
}

View File

@ -1,15 +0,0 @@
// +build solaris,!appengine
package logrus
import (
"os"
"golang.org/x/sys/unix"
)
// IsTerminal returns true if the given file descriptor is a terminal.
func IsTerminal() bool {
_, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA)
return err == nil
}

View File

@ -1,27 +0,0 @@
// Based on ssh/terminal:
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build windows,!appengine
package logrus
import (
"syscall"
"unsafe"
)
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
)
// IsTerminal returns true if stderr's file descriptor is a terminal.
func IsTerminal() bool {
fd := syscall.Stderr
var st uint32
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
return r != 0 && e == 0
}

View File

@ -1,170 +0,0 @@
package logrus
import (
"bytes"
"fmt"
"runtime"
"sort"
"strings"
"time"
)
const (
nocolor = 0
red = 31
green = 32
yellow = 33
blue = 34
gray = 37
)
var (
baseTimestamp time.Time
isTerminal bool
)
func init() {
baseTimestamp = time.Now()
isTerminal = IsTerminal()
}
func miniTS() int {
return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
ForceColors bool
// Force disabling colors.
DisableColors bool
// Disable timestamp logging. useful when output is redirected to logging
// system that already adds timestamps.
DisableTimestamp bool
// Enable logging the full timestamp when a TTY is attached instead of just
// the time passed since beginning of execution.
FullTimestamp bool
// TimestampFormat to use for display when a full timestamp is printed
TimestampFormat string
// The fields are sorted by default for a consistent output. For applications
// that log extremely frequently and don't use the JSON formatter this may not
// be desired.
DisableSorting bool
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
var b *bytes.Buffer
var keys []string = make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
if !f.DisableSorting {
sort.Strings(keys)
}
if entry.Buffer != nil {
b = entry.Buffer
} else {
b = &bytes.Buffer{}
}
prefixFieldClashes(entry.Data)
isColorTerminal := isTerminal && (runtime.GOOS != "windows")
isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors
timestampFormat := f.TimestampFormat
if timestampFormat == "" {
timestampFormat = DefaultTimestampFormat
}
if isColored {
f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
if entry.Message != "" {
f.appendKeyValue(b, "msg", entry.Message)
}
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
}
b.WriteByte('\n')
return b.Bytes(), nil
}
func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
case DebugLevel:
levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
levelColor = red
default:
levelColor = blue
}
levelText := strings.ToUpper(entry.Level.String())[0:4]
if f.DisableTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
} else if !f.FullTimestamp {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
} else {
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
}
for _, k := range keys {
v := entry.Data[k]
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
f.appendValue(b, v)
}
}
func needsQuoting(text string) bool {
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
(ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
return true
}
}
return false
}
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
b.WriteString(key)
b.WriteByte('=')
f.appendValue(b, value)
b.WriteByte(' ')
}
func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
switch value := value.(type) {
case string:
if !needsQuoting(value) {
b.WriteString(value)
} else {
fmt.Fprintf(b, "%q", value)
}
case error:
errmsg := value.Error()
if !needsQuoting(errmsg) {
b.WriteString(errmsg)
} else {
fmt.Fprintf(b, "%q", errmsg)
}
default:
fmt.Fprint(b, value)
}
}

View File

@ -1,53 +0,0 @@
package logrus
import (
"bufio"
"io"
"runtime"
)
func (logger *Logger) Writer() *io.PipeWriter {
return logger.WriterLevel(InfoLevel)
}
func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
reader, writer := io.Pipe()
var printFunc func(args ...interface{})
switch level {
case DebugLevel:
printFunc = logger.Debug
case InfoLevel:
printFunc = logger.Info
case WarnLevel:
printFunc = logger.Warn
case ErrorLevel:
printFunc = logger.Error
case FatalLevel:
printFunc = logger.Fatal
case PanicLevel:
printFunc = logger.Panic
default:
printFunc = logger.Print
}
go logger.writerScanner(reader, printFunc)
runtime.SetFinalizer(writer, writerFinalizer)
return writer
}
func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
scanner := bufio.NewScanner(reader)
for scanner.Scan() {
printFunc(scanner.Text())
}
if err := scanner.Err(); err != nil {
logger.Errorf("Error while reading from Writer: %s", err)
}
reader.Close()
}
func writerFinalizer(writer *io.PipeWriter) {
writer.Close()
}

1
vendor/github.com/Xe/gopreload/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
*.so

19
vendor/github.com/Xe/gopreload/LICENSE generated vendored Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2017 Christine Dodrill <me@christine.website>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.

51
vendor/github.com/Xe/gopreload/README.md generated vendored Normal file
View File

@ -0,0 +1,51 @@
gopreload
=========
An emulation of the linux libc `LD_PRELOAD` except for use with Go plugins for
the addition of instrumentation and debugging utilities.
## Pluginizer
`pluginizer` is a bit of glue that makes it easier to turn underscore imports
into plugins:
```console
$ go get github.com/Xe/gopreload/cmd/pluginizer
$ pluginizer -help
Usage of pluginizer:
-dest string
destination package to generate
-pkg string
package to underscore import
$ pluginizer -pkg github.com/lib/pq -dest github.com/Xe/gopreload/database/postgres
To build this plugin:
$ go build -buildmode plugin -o /path/to/output.so github.com/Xe/gopreload/database/postgres
```
### Database drivers
I have included plugin boilerplate autogenned versions of the sqlite, postgres
and mysql database drivers.
## Manhole
[`manhole`][manhole] is an example of debugging and introspection tooling that has
been useful when debugging past issues with long-running server processes.
## Security Implications
This package assumes that programs run using it are never started with environment
variables that are set by unauthenticated users. Any errors in loading the plugins
will be logged using the standard library logger `log` and ignored.
This has about the same security implications as [`LD_PRELOAD`][ld-preload] does in most
Linux distributions, but the risk is minimal compared to the massive benefit for
being able to have arbitrary background services all be able to be dug into using
the same tooling or being able to have metric submission be completely separated
from the backend metric creation. Common logging setup processes can be _always_
loaded, making the default logger settings into the correct settings.
---
[manhole]: https://github.com/Xe/gopreload/tree/master/manhole
[ld-preload]: https://rafalcieslak.wordpress.com/2013/04/02/dynamic-linker-tricks-using-ld_preload-to-cheat-inject-features-and-investigate-programs/

56
vendor/github.com/Xe/gopreload/cmd/pluginizer/main.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
package main
import (
"flag"
"fmt"
"log"
"os"
"path/filepath"
_ "github.com/Xe/gopreload"
)
var (
pkgName = flag.String("pkg", "", "package to underscore import")
destPkgName = flag.String("dest", "", "destination package to generate")
)
const codeTemplate = `//+build go1.8
package main
import _ "$PACKAGE_PATH"`
func main() {
flag.Parse()
if *pkgName == "" || *destPkgName == "" {
log.Fatal("must set -pkg and -dest")
}
srcDir := filepath.Join(os.Getenv("GOPATH"), "src", *destPkgName)
err := os.MkdirAll(srcDir, os.ModePerm)
if err != nil {
log.Fatal(err)
}
fout, err := os.Create(srcDir + "/main.go")
if err != nil {
log.Fatal(err)
}
defer fout.Close()
codeBody := os.Expand(codeTemplate, func(s string) string {
if s == "PACKAGE_PATH" {
return *pkgName
}
return "no idea man"
})
fmt.Fprintln(fout, codeBody)
fmt.Println("To build this plugin: ")
fmt.Println(" $ go build -buildmode plugin -o /path/to/output.so " + *destPkgName)
}

View File

@ -0,0 +1,5 @@
//+build go1.8
package main
import _ "github.com/go-sql-driver/mysql"

View File

@ -0,0 +1,5 @@
//+build go1.8
package main
import _ "github.com/lib/pq"

View File

@ -0,0 +1,5 @@
//+build go1.8
package main
import _ "github.com/mattn/go-sqlite3"

56
vendor/github.com/Xe/gopreload/manhole/README.md generated vendored Normal file
View File

@ -0,0 +1,56 @@
# manhole
An opinionated HTTP manhole into Go processes.
## Assumptions This Package Makes
- Make each server instance have a unique HTTP port that is randomized by default.
This makes it very hard to accidentally route this manhole to the outside world.
If more assurance is required I personally suggest using [yubikey totp][yktotp],
but do research.
- Application code does not touch [`http.DefaultServeMux`][default-servemux]'. This is so that
administative control rods can be dynamically flipped in the case they are
needed.
- [pprof][pprof] endpoints added to `http.DefaultServeMux`. This allows easy
access to [pprof runtime tracing][pprof-tracing] to debug issues on long-running
applications like HTTP services.
- Make the manhole slightly inconvenient to put into place in production. This
helps make sure that this tool remains a debugging tool and not a part of a
long-term production rollout.
## Usage
Compile this as a plugin:
```console
$ go get -d github.com/Xe/gopreload/manhole
$ go build -buildmode plugin -o manhole.so github.com/Xe/gopreload/manhole
```
Then add [`gopreload`][gopreload] to your application:
```go
// gopreload.go
package main
/*
This file is separate to make it very easy to both add into an application, but
also very easy to remove.
*/
import _ "github.com/Xe/gopreload"
```
And at runtime add the `manhole.so` file you created earlier to the target system
somehow and add the following environment variable to its run configuration:
```sh
GO_PRELOAD=/path/to/manhole.so
```
---
[pprof]: https://godoc.org/net/http/pprof
[default-servemux]: https://godoc.org/net/http#pkg-variables
[yktotp]: https://github.com/GeertJohan/yubigo
[gopreload]: https://github.com/Xe/gopreload

22
vendor/github.com/Xe/gopreload/manhole/server.go generated vendored Normal file
View File

@ -0,0 +1,22 @@
package main
import (
"log"
"net"
"net/http"
_ "net/http/pprof"
"net/rpc"
)
func init() {
l, err := net.Listen("tcp", "127.0.0.2:0")
if err != nil {
log.Printf("manhole: cannot bind to 127.0.0.2:0: %v", err)
return
}
log.Printf("manhole: Now listening on http://%s", l.Addr())
rpc.HandleHTTP()
go http.Serve(l, nil)
}

55
vendor/github.com/Xe/gopreload/sample/main.go generated vendored Normal file
View File

@ -0,0 +1,55 @@
package main
import (
"fmt"
"math/rand"
"net/http"
"runtime"
"time"
_ "github.com/Xe/gopreload"
"github.com/Xe/ln"
)
func main() {
http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
spew()
ln.Log(ln.F{"action": "gc_spew", "who": r.RemoteAddr})
fmt.Fprintln(w, "done")
})
http.ListenAndServe(":9184", nil)
}
func makeBuffer() []byte {
return make([]byte, rand.Intn(5000000)+5000000)
}
func spew() {
pool := make([][]byte, 20)
var m runtime.MemStats
makes := 0
for _ = range make([]struct{}, 50) {
b := makeBuffer()
makes += 1
i := rand.Intn(len(pool))
pool[i] = b
time.Sleep(time.Millisecond * 250)
bytes := 0
for i := 0; i < len(pool); i++ {
if pool[i] != nil {
bytes += len(pool[i])
}
}
runtime.ReadMemStats(&m)
fmt.Printf("%d,%d,%d,%d,%d,%d\n", m.HeapSys, bytes, m.HeapAlloc,
m.HeapIdle, m.HeapReleased, makes)
}
}

View File

@ -0,0 +1,36 @@
package main
import (
"runtime"
"time"
"github.com/Xe/ln"
)
func init() {
ln.Log(ln.F{
"action": "started_up",
"every": "20_seconds",
"what": "gc_metrics",
})
go func() {
for {
time.Sleep(20 * time.Second)
gatherMetrics()
}
}()
}
func gatherMetrics() {
stats := &runtime.MemStats{}
runtime.ReadMemStats(stats)
ln.Log(ln.F{
"gc-collections": stats.NumGC,
"gc-stw-pause-total": stats.PauseTotalNs,
"live-object-count": stats.Mallocs - stats.Frees,
"heap-bytes": stats.Alloc,
"stack-bytes": stats.StackInuse,
})
}

25
vendor/github.com/Xe/ln/LICENSE generated vendored Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2015, Andrew Gwozdziewycz
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

29
vendor/github.com/Xe/ln/README.md generated vendored Normal file
View File

@ -0,0 +1,29 @@
# ln: The Natural Logger for Go
`ln` provides a simple interface to logging, and metrics, and
obviates the need to utilize purpose built metrics packages, like
`go-metrics` for simple use cases.
The design of `ln` centers around the idea of key-value pairs, which
can be interpreted on the fly, but "Filters" to do things such as
aggregated metrics, and report said metrics to, say Librato, or
statsd.
"Filters" are like WSGI, or Rack Middleware. They are run "top down"
and can abort an emitted log's output at any time, or continue to let
it through the chain. However, the interface is slightly different
than that. Rather than encapsulating the chain with partial function
application, we utilize a simpler method, namely, each plugin defines
an `Apply` function, which takes as an argument the log event, and
performs the work of the plugin, only if the Plugin "Applies" to this
log event.
If `Apply` returns `false`, the iteration through the rest of the
filters is aborted, and the log is dropped from further processing.
## Current Status: Initial Development / Concept
## Copyright
(c) 2015, Andrew Gwozdziewycz, BSD Licensed. See LICENSE for more
info.

51
vendor/github.com/Xe/ln/example/http.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
// +build ignore
package main
import (
"context"
"flag"
"net/http"
"time"
"github.com/Xe/ln"
"github.com/Xe/ln/ex"
"github.com/facebookgo/flagenv"
"golang.org/x/net/trace"
)
var (
port = flag.String("port", "2145", "http port to listen on")
tracingFamily = flag.String("trace-family", "ln example", "tracing family to use for x/net/trace")
)
func main() {
flagenv.Parse()
flag.Parse()
ln.DefaultLogger.Filters = append(ln.DefaultLogger.Filters, ex.NewGoTraceLogger())
http.HandleFunc("/", handleIndex)
http.ListenAndServe(":"+*port, middlewareSpan(ex.HTTPLog(http.DefaultServeMux)))
}
func middlewareSpan(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
sp := trace.New(*tracingFamily, "HTTP request")
defer sp.Finish()
ctx, cancel := context.WithTimeout(r.Context(), 30*time.Second)
defer cancel()
ctx = trace.NewContext(ctx, sp)
next.ServeHTTP(w, r.WithContext(ctx))
})
}
func handleIndex(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
ln.Log(ctx, ln.Action("index"), ln.F{"there_is": "no_danger"})
http.Error(w, "There is no danger citizen", http.StatusOK)
}

111
vendor/github.com/Xe/ln/logger_test.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package ln
import (
"bytes"
"context"
"fmt"
"testing"
"time"
)
var ctx context.Context
func setup(t *testing.T) (*bytes.Buffer, func()) {
ctx = context.Background()
out := bytes.Buffer{}
oldFilters := DefaultLogger.Filters
DefaultLogger.Filters = []Filter{NewWriterFilter(&out, nil)}
return &out, func() {
DefaultLogger.Filters = oldFilters
}
}
func TestSimpleError(t *testing.T) {
out, teardown := setup(t)
defer teardown()
Log(ctx, F{"err": fmt.Errorf("This is an Error!!!")}, F{"msg": "fooey", "bar": "foo"})
data := []string{
`err="This is an Error!!!"`,
`fooey`,
`bar=foo`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
func TestTimeConversion(t *testing.T) {
out, teardown := setup(t)
defer teardown()
var zeroTime time.Time
Log(ctx, F{"zero": zeroTime})
data := []string{
`zero=0001-01-01T00:00:00Z`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
func TestDebug(t *testing.T) {
out, teardown := setup(t)
defer teardown()
// set priority to Debug
Error(ctx, fmt.Errorf("This is an Error!!!"), F{})
data := []string{
`err="This is an Error!!!"`,
`_lineno=`,
`_function=ln.TestDebug`,
`_filename=github.com/Xe/ln/logger_test.go`,
`cause="This is an Error!!!"`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
func TestFer(t *testing.T) {
out, teardown := setup(t)
defer teardown()
underTest := foobar{Foo: 1, Bar: "quux"}
Log(ctx, underTest)
data := []string{
`foo=1`,
`bar=quux`,
}
for _, line := range data {
if !bytes.Contains(out.Bytes(), []byte(line)) {
t.Fatalf("Bytes: %s not in %s", line, out.Bytes())
}
}
}
type foobar struct {
Foo int
Bar string
}
func (f foobar) F() F {
return F{
"foo": f.Foo,
"bar": f.Bar,
}
}

2
vendor/github.com/Xe/uuid/CONTRIBUTORS generated vendored Normal file
View File

@ -0,0 +1,2 @@
Paul Borman <borman@google.com>
Christine Dodrill <xena@yolo-swag.com>

27
vendor/github.com/Xe/uuid/LICENSE generated vendored Normal file
View File

@ -0,0 +1,27 @@
Copyright (c) 2009 Google Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

5
vendor/github.com/Xe/uuid/README.md generated vendored Normal file
View File

@ -0,0 +1,5 @@
go-uuid
=======
code.google.com is going away and I use this library a lot. It used to live at
https://code.google.com/p/go-uuid/ but now I take care of it.

390
vendor/github.com/Xe/uuid/uuid_test.go generated vendored Normal file
View File

@ -0,0 +1,390 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"fmt"
"os"
"strings"
"testing"
"time"
)
type test struct {
in string
version Version
variant Variant
isuuid bool
}
var tests = []test{
{"f47ac10b-58cc-0372-8567-0e02b2c3d479", 0, RFC4122, true},
{"f47ac10b-58cc-1372-8567-0e02b2c3d479", 1, RFC4122, true},
{"f47ac10b-58cc-2372-8567-0e02b2c3d479", 2, RFC4122, true},
{"f47ac10b-58cc-3372-8567-0e02b2c3d479", 3, RFC4122, true},
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-5372-8567-0e02b2c3d479", 5, RFC4122, true},
{"f47ac10b-58cc-6372-8567-0e02b2c3d479", 6, RFC4122, true},
{"f47ac10b-58cc-7372-8567-0e02b2c3d479", 7, RFC4122, true},
{"f47ac10b-58cc-8372-8567-0e02b2c3d479", 8, RFC4122, true},
{"f47ac10b-58cc-9372-8567-0e02b2c3d479", 9, RFC4122, true},
{"f47ac10b-58cc-a372-8567-0e02b2c3d479", 10, RFC4122, true},
{"f47ac10b-58cc-b372-8567-0e02b2c3d479", 11, RFC4122, true},
{"f47ac10b-58cc-c372-8567-0e02b2c3d479", 12, RFC4122, true},
{"f47ac10b-58cc-d372-8567-0e02b2c3d479", 13, RFC4122, true},
{"f47ac10b-58cc-e372-8567-0e02b2c3d479", 14, RFC4122, true},
{"f47ac10b-58cc-f372-8567-0e02b2c3d479", 15, RFC4122, true},
{"urn:uuid:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
{"URN:UUID:f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-0567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-1567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-2567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-3567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-4567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-5567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-6567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-7567-0e02b2c3d479", 4, Reserved, true},
{"f47ac10b-58cc-4372-8567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-9567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-a567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-b567-0e02b2c3d479", 4, RFC4122, true},
{"f47ac10b-58cc-4372-c567-0e02b2c3d479", 4, Microsoft, true},
{"f47ac10b-58cc-4372-d567-0e02b2c3d479", 4, Microsoft, true},
{"f47ac10b-58cc-4372-e567-0e02b2c3d479", 4, Future, true},
{"f47ac10b-58cc-4372-f567-0e02b2c3d479", 4, Future, true},
{"f47ac10b158cc-5372-a567-0e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc25372-a567-0e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc-53723a567-0e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc-5372-a56740e02b2c3d479", 0, Invalid, false},
{"f47ac10b-58cc-5372-a567-0e02-2c3d479", 0, Invalid, false},
{"g47ac10b-58cc-4372-a567-0e02b2c3d479", 0, Invalid, false},
}
var constants = []struct {
c interface{}
name string
}{
{Person, "Person"},
{Group, "Group"},
{Org, "Org"},
{Invalid, "Invalid"},
{RFC4122, "RFC4122"},
{Reserved, "Reserved"},
{Microsoft, "Microsoft"},
{Future, "Future"},
{Domain(17), "Domain17"},
{Variant(42), "BadVariant42"},
}
func testTest(t *testing.T, in string, tt test) {
uuid := Parse(in)
if ok := (uuid != nil); ok != tt.isuuid {
t.Errorf("Parse(%s) got %v expected %v\b", in, ok, tt.isuuid)
}
if uuid == nil {
return
}
if v := uuid.Variant(); v != tt.variant {
t.Errorf("Variant(%s) got %d expected %d\b", in, v, tt.variant)
}
if v, _ := uuid.Version(); v != tt.version {
t.Errorf("Version(%s) got %d expected %d\b", in, v, tt.version)
}
}
func TestUUID(t *testing.T) {
for _, tt := range tests {
testTest(t, tt.in, tt)
testTest(t, strings.ToUpper(tt.in), tt)
}
}
func TestConstants(t *testing.T) {
for x, tt := range constants {
v, ok := tt.c.(fmt.Stringer)
if !ok {
t.Errorf("%x: %v: not a stringer", x, v)
} else if s := v.String(); s != tt.name {
v, _ := tt.c.(int)
t.Errorf("%x: Constant %T:%d gives %q, expected %q\n", x, tt.c, v, s, tt.name)
}
}
}
func TestRandomUUID(t *testing.T) {
m := make(map[string]bool)
for x := 1; x < 32; x++ {
uuid := NewRandom()
s := uuid.String()
if m[s] {
t.Errorf("NewRandom returned duplicated UUID %s\n", s)
}
m[s] = true
if v, _ := uuid.Version(); v != 4 {
t.Errorf("Random UUID of version %s\n", v)
}
if uuid.Variant() != RFC4122 {
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
}
}
}
func TestNew(t *testing.T) {
m := make(map[string]bool)
for x := 1; x < 32; x++ {
s := New()
if m[s] {
t.Errorf("New returned duplicated UUID %s\n", s)
}
m[s] = true
uuid := Parse(s)
if uuid == nil {
t.Errorf("New returned %q which does not decode\n", s)
continue
}
if v, _ := uuid.Version(); v != 4 {
t.Errorf("Random UUID of version %s\n", v)
}
if uuid.Variant() != RFC4122 {
t.Errorf("Random UUID is variant %d\n", uuid.Variant())
}
}
}
func clockSeq(t *testing.T, uuid UUID) int {
seq, ok := uuid.ClockSequence()
if !ok {
t.Fatalf("%s: invalid clock sequence\n", uuid)
}
return seq
}
func TestClockSeq(t *testing.T) {
// Fake time.Now for this test to return a monotonically advancing time; restore it at end.
defer func(orig func() time.Time) { timeNow = orig }(timeNow)
monTime := time.Now()
timeNow = func() time.Time {
monTime = monTime.Add(1 * time.Second)
return monTime
}
SetClockSequence(-1)
uuid1 := NewUUID()
uuid2 := NewUUID()
if clockSeq(t, uuid1) != clockSeq(t, uuid2) {
t.Errorf("clock sequence %d != %d\n", clockSeq(t, uuid1), clockSeq(t, uuid2))
}
SetClockSequence(-1)
uuid2 = NewUUID()
// Just on the very off chance we generated the same sequence
// two times we try again.
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
SetClockSequence(-1)
uuid2 = NewUUID()
}
if clockSeq(t, uuid1) == clockSeq(t, uuid2) {
t.Errorf("Duplicate clock sequence %d\n", clockSeq(t, uuid1))
}
SetClockSequence(0x1234)
uuid1 = NewUUID()
if seq := clockSeq(t, uuid1); seq != 0x1234 {
t.Errorf("%s: expected seq 0x1234 got 0x%04x\n", uuid1, seq)
}
}
func TestCoding(t *testing.T) {
text := "7d444840-9dc0-11d1-b245-5ffdce74fad2"
urn := "urn:uuid:7d444840-9dc0-11d1-b245-5ffdce74fad2"
data := UUID{
0x7d, 0x44, 0x48, 0x40,
0x9d, 0xc0,
0x11, 0xd1,
0xb2, 0x45,
0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2,
}
if v := data.String(); v != text {
t.Errorf("%x: encoded to %s, expected %s\n", data, v, text)
}
if v := data.URN(); v != urn {
t.Errorf("%x: urn is %s, expected %s\n", data, v, urn)
}
uuid := Parse(text)
if !Equal(uuid, data) {
t.Errorf("%s: decoded to %s, expected %s\n", text, uuid, data)
}
}
func TestVersion1(t *testing.T) {
uuid1 := NewUUID()
uuid2 := NewUUID()
if Equal(uuid1, uuid2) {
t.Errorf("%s:duplicate uuid\n", uuid1)
}
if v, _ := uuid1.Version(); v != 1 {
t.Errorf("%s: version %s expected 1\n", uuid1, v)
}
if v, _ := uuid2.Version(); v != 1 {
t.Errorf("%s: version %s expected 1\n", uuid2, v)
}
n1 := uuid1.NodeID()
n2 := uuid2.NodeID()
if !bytes.Equal(n1, n2) {
t.Errorf("Different nodes %x != %x\n", n1, n2)
}
t1, ok := uuid1.Time()
if !ok {
t.Errorf("%s: invalid time\n", uuid1)
}
t2, ok := uuid2.Time()
if !ok {
t.Errorf("%s: invalid time\n", uuid2)
}
q1, ok := uuid1.ClockSequence()
if !ok {
t.Errorf("%s: invalid clock sequence\n", uuid1)
}
q2, ok := uuid2.ClockSequence()
if !ok {
t.Errorf("%s: invalid clock sequence", uuid2)
}
switch {
case t1 == t2 && q1 == q2:
t.Errorf("time stopped\n")
case t1 > t2 && q1 == q2:
t.Errorf("time reversed\n")
case t1 < t2 && q1 != q2:
t.Errorf("clock sequence chaned unexpectedly\n")
}
}
func TestNodeAndTime(t *testing.T) {
// Time is February 5, 1998 12:30:23.136364800 AM GMT
uuid := Parse("7d444840-9dc0-11d1-b245-5ffdce74fad2")
node := []byte{0x5f, 0xfd, 0xce, 0x74, 0xfa, 0xd2}
ts, ok := uuid.Time()
if ok {
c := time.Unix(ts.UnixTime())
want := time.Date(1998, 2, 5, 0, 30, 23, 136364800, time.UTC)
if !c.Equal(want) {
t.Errorf("Got time %v, want %v", c, want)
}
} else {
t.Errorf("%s: bad time\n", uuid)
}
if !bytes.Equal(node, uuid.NodeID()) {
t.Errorf("Expected node %v got %v\n", node, uuid.NodeID())
}
}
func TestMD5(t *testing.T) {
uuid := NewMD5(NameSpace_DNS, []byte("python.org")).String()
want := "6fa459ea-ee8a-3ca4-894e-db77e160355e"
if uuid != want {
t.Errorf("MD5: got %q expected %q\n", uuid, want)
}
}
func TestSHA1(t *testing.T) {
uuid := NewSHA1(NameSpace_DNS, []byte("python.org")).String()
want := "886313e1-3b8a-5372-9b90-0c9aee199e5d"
if uuid != want {
t.Errorf("SHA1: got %q expected %q\n", uuid, want)
}
}
func TestNodeID(t *testing.T) {
nid := []byte{1, 2, 3, 4, 5, 6}
SetNodeInterface("")
s := NodeInterface()
if s == "" || s == "user" {
t.Errorf("NodeInterface %q after SetInteface\n", s)
}
node1 := NodeID()
if node1 == nil {
t.Errorf("NodeID nil after SetNodeInterface\n", s)
}
SetNodeID(nid)
s = NodeInterface()
if s != "user" {
t.Errorf("Expected NodeInterface %q got %q\n", "user", s)
}
node2 := NodeID()
if node2 == nil {
t.Errorf("NodeID nil after SetNodeID\n", s)
}
if bytes.Equal(node1, node2) {
t.Errorf("NodeID not changed after SetNodeID\n", s)
} else if !bytes.Equal(nid, node2) {
t.Errorf("NodeID is %x, expected %x\n", node2, nid)
}
}
func testDCE(t *testing.T, name string, uuid UUID, domain Domain, id uint32) {
if uuid == nil {
t.Errorf("%s failed\n", name)
return
}
if v, _ := uuid.Version(); v != 2 {
t.Errorf("%s: %s: expected version 2, got %s\n", name, uuid, v)
return
}
if v, ok := uuid.Domain(); !ok || v != domain {
if !ok {
t.Errorf("%s: %d: Domain failed\n", name, uuid)
} else {
t.Errorf("%s: %s: expected domain %d, got %d\n", name, uuid, domain, v)
}
}
if v, ok := uuid.Id(); !ok || v != id {
if !ok {
t.Errorf("%s: %d: Id failed\n", name, uuid)
} else {
t.Errorf("%s: %s: expected id %d, got %d\n", name, uuid, id, v)
}
}
}
func TestDCE(t *testing.T) {
testDCE(t, "NewDCESecurity", NewDCESecurity(42, 12345678), 42, 12345678)
testDCE(t, "NewDCEPerson", NewDCEPerson(), Person, uint32(os.Getuid()))
testDCE(t, "NewDCEGroup", NewDCEGroup(), Group, uint32(os.Getgid()))
}
type badRand struct{}
func (r badRand) Read(buf []byte) (int, error) {
for i, _ := range buf {
buf[i] = byte(i)
}
return len(buf), nil
}
func TestBadRand(t *testing.T) {
SetRand(badRand{})
uuid1 := New()
uuid2 := New()
if uuid1 != uuid2 {
t.Errorf("execpted duplicates, got %q and %q\n", uuid1, uuid2)
}
SetRand(nil)
uuid1 = New()
uuid2 = New()
if uuid1 == uuid2 {
t.Errorf("unexecpted duplicates, got %q\n", uuid1)
}
}

26
vendor/github.com/Xe/x/.gitignore generated vendored Normal file
View File

@ -0,0 +1,26 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe
*.test
*.prof
.env

6
vendor/github.com/Xe/x/BLESSING generated vendored Normal file
View File

@ -0,0 +1,6 @@
The author disclaims copyright to this source code. In place of
a legal notice, here is a blessing:
May you do good and not evil.
May you find forgiveness for yourself and forgive others.
May you share freely, never taking more than you give.

121
vendor/github.com/Xe/x/LICENSE generated vendored Normal file
View File

@ -0,0 +1,121 @@
Creative Commons Legal Code
CC0 1.0 Universal
CREATIVE COMMONS CORPORATION IS NOT A LAW FIRM AND DOES NOT PROVIDE
LEGAL SERVICES. DISTRIBUTION OF THIS DOCUMENT DOES NOT CREATE AN
ATTORNEY-CLIENT RELATIONSHIP. CREATIVE COMMONS PROVIDES THIS
INFORMATION ON AN "AS-IS" BASIS. CREATIVE COMMONS MAKES NO WARRANTIES
REGARDING THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS
PROVIDED HEREUNDER, AND DISCLAIMS LIABILITY FOR DAMAGES RESULTING FROM
THE USE OF THIS DOCUMENT OR THE INFORMATION OR WORKS PROVIDED
HEREUNDER.
Statement of Purpose
The laws of most jurisdictions throughout the world automatically confer
exclusive Copyright and Related Rights (defined below) upon the creator
and subsequent owner(s) (each and all, an "owner") of an original work of
authorship and/or a database (each, a "Work").
Certain owners wish to permanently relinquish those rights to a Work for
the purpose of contributing to a commons of creative, cultural and
scientific works ("Commons") that the public can reliably and without fear
of later claims of infringement build upon, modify, incorporate in other
works, reuse and redistribute as freely as possible in any form whatsoever
and for any purposes, including without limitation commercial purposes.
These owners may contribute to the Commons to promote the ideal of a free
culture and the further production of creative, cultural and scientific
works, or to gain reputation or greater distribution for their Work in
part through the use and efforts of others.
For these and/or other purposes and motivations, and without any
expectation of additional consideration or compensation, the person
associating CC0 with a Work (the "Affirmer"), to the extent that he or she
is an owner of Copyright and Related Rights in the Work, voluntarily
elects to apply CC0 to the Work and publicly distribute the Work under its
terms, with knowledge of his or her Copyright and Related Rights in the
Work and the meaning and intended legal effect of CC0 on those rights.
1. Copyright and Related Rights. A Work made available under CC0 may be
protected by copyright and related or neighboring rights ("Copyright and
Related Rights"). Copyright and Related Rights include, but are not
limited to, the following:
i. the right to reproduce, adapt, distribute, perform, display,
communicate, and translate a Work;
ii. moral rights retained by the original author(s) and/or performer(s);
iii. publicity and privacy rights pertaining to a person's image or
likeness depicted in a Work;
iv. rights protecting against unfair competition in regards to a Work,
subject to the limitations in paragraph 4(a), below;
v. rights protecting the extraction, dissemination, use and reuse of data
in a Work;
vi. database rights (such as those arising under Directive 96/9/EC of the
European Parliament and of the Council of 11 March 1996 on the legal
protection of databases, and under any national implementation
thereof, including any amended or successor version of such
directive); and
vii. other similar, equivalent or corresponding rights throughout the
world based on applicable law or treaty, and any national
implementations thereof.
2. Waiver. To the greatest extent permitted by, but not in contravention
of, applicable law, Affirmer hereby overtly, fully, permanently,
irrevocably and unconditionally waives, abandons, and surrenders all of
Affirmer's Copyright and Related Rights and associated claims and causes
of action, whether now known or unknown (including existing as well as
future claims and causes of action), in the Work (i) in all territories
worldwide, (ii) for the maximum duration provided by applicable law or
treaty (including future time extensions), (iii) in any current or future
medium and for any number of copies, and (iv) for any purpose whatsoever,
including without limitation commercial, advertising or promotional
purposes (the "Waiver"). Affirmer makes the Waiver for the benefit of each
member of the public at large and to the detriment of Affirmer's heirs and
successors, fully intending that such Waiver shall not be subject to
revocation, rescission, cancellation, termination, or any other legal or
equitable action to disrupt the quiet enjoyment of the Work by the public
as contemplated by Affirmer's express Statement of Purpose.
3. Public License Fallback. Should any part of the Waiver for any reason
be judged legally invalid or ineffective under applicable law, then the
Waiver shall be preserved to the maximum extent permitted taking into
account Affirmer's express Statement of Purpose. In addition, to the
extent the Waiver is so judged Affirmer hereby grants to each affected
person a royalty-free, non transferable, non sublicensable, non exclusive,
irrevocable and unconditional license to exercise Affirmer's Copyright and
Related Rights in the Work (i) in all territories worldwide, (ii) for the
maximum duration provided by applicable law or treaty (including future
time extensions), (iii) in any current or future medium and for any number
of copies, and (iv) for any purpose whatsoever, including without
limitation commercial, advertising or promotional purposes (the
"License"). The License shall be deemed effective as of the date CC0 was
applied by Affirmer to the Work. Should any part of the License for any
reason be judged legally invalid or ineffective under applicable law, such
partial invalidity or ineffectiveness shall not invalidate the remainder
of the License, and in such case Affirmer hereby affirms that he or she
will not (i) exercise any of his or her remaining Copyright and Related
Rights in the Work or (ii) assert any associated claims and causes of
action with respect to the Work, in either case contrary to Affirmer's
express Statement of Purpose.
4. Limitations and Disclaimers.
a. No trademark or patent rights held by Affirmer are waived, abandoned,
surrendered, licensed or otherwise affected by this document.
b. Affirmer offers the Work as-is and makes no representations or
warranties of any kind concerning the Work, express, implied,
statutory or otherwise, including without limitation warranties of
title, merchantability, fitness for a particular purpose, non
infringement, or the absence of latent or other defects, accuracy, or
the present or absence of errors, whether or not discoverable, all to
the greatest extent permissible under applicable law.
c. Affirmer disclaims responsibility for clearing rights of other persons
that may apply to the Work or any use thereof, including without
limitation any person's Copyright and Related Rights in the Work.
Further, Affirmer disclaims responsibility for obtaining any necessary
consents, permissions or other rights required for any use of the
Work.
d. Affirmer understands and acknowledges that Creative Commons is not a
party to this document and has no duty or obligation with respect to
this CC0 or use of the Work.

126
vendor/github.com/Xe/x/README.md generated vendored Normal file
View File

@ -0,0 +1,126 @@
# tools
Various tools of mine in Go
Installing these tools
----------------------
To install any of these tools, type in:
```console
$ go get christine.website/go/tools/$toolname
```
For example:
```console
$ go get christine.website/go/tools/license
```
`dokku`
-------
This is a simple command line tool to interface with Dokku servers. This is
a port of my shell extension
[`dokku.zsh`](https://github.com/Xe/dotfiles/blob/master/.zsh/dokku.zsh) to
a nice Go binary.
This takes a configuration file for defining multiple servers:
```ini
[server "default"]
user = dokku
host = panel.apps.xeserv.us
sshkey = /.ssh/id_rsa
```
By default it will imply that the SSH key is `~/.ssh/id_rsa` and that the
username is `dokku`. By default the server named `default` will be used for
command execution.
### TODO
- [ ] Allow interactive commands
- [ ] Directly pipe stdin and stdout to the ssh connection
---
`license`
---------
This is a simple command line tool to help users generate a license file based
on information they have already given their system and is easy for the system
to figure out on its own.
```console
$ license
Usage of license:
license [options] <license kind>
-email="": email of the person licensing the software
-name="": name of the person licensing the software
-out=false: write to a file instead of stdout
-show=false: show all licenses instead of generating one
By default the name and email are scraped from `git config`
```
```console
$ license -show
Licenses available:
zlib
unlicense
mit
apache
bsd-2
gpl-2
```
```console
$ license zlib
Copyright (c) 2015 Christine Dodrill <xena@yolo-swag.com>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgement in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
```
---
`ghstat`
--------
Command ghstat shows the status of GitHub via their status API.
Usage of ./ghstat:
-message=false: show last message?
This follows https://status.github.com/api for all but the list of all recent
status messages.
```console
$ ghstat
Status: minor (Fri Mar 27 15:24:57 2015)
```
```console
$ ghstat -message
Last message:
Status: minor
Message: We've deployed our volumetric attack defenses against an extremely
large amount of traffic. Performance is stabilizing.
Time: Fri Mar 27 15:04:59 2015
```

3
vendor/github.com/Xe/x/farbfeld/README.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# farbfeld filters
Filters and tools for http://tools.suckless.org/farbfeld/

43
vendor/github.com/Xe/x/farbfeld/ff-primitive/main.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package main
import (
"flag"
"image"
"log"
"os"
"runtime"
"github.com/fogleman/primitive/primitive"
farbfeld "github.com/hullerob/go.farbfeld"
)
var (
shapeCount = flag.Int("count", 150, "number of shapes used")
repeatShapeCount = flag.Int("repeat-count", 0, "number of extra shapes drawn in each step")
alpha = flag.Int("alpha", 128, "alpha of all shapes")
)
func stepImg(img image.Image, count int) image.Image {
bg := primitive.MakeColor(primitive.AverageImageColor(img))
model := primitive.NewModel(img, bg, 512, runtime.NumCPU())
for range make([]struct{}, count) {
model.Step(primitive.ShapeTypeTriangle, *alpha, *repeatShapeCount)
}
return model.Context.Image()
}
func main() {
flag.Parse()
img, err := farbfeld.Decode(os.Stdin)
if err != nil {
log.Fatal(err)
}
err = farbfeld.Encode(os.Stdout, stepImg(img, *shapeCount))
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,10 @@
b572f0728b691aae4256edb2e408279146eafe52 github.com/hullerob/go.farbfeld
ee8994ff90057955c428a5a949da5d064bf3ce6b github.com/fogleman/gg
80f39ceaa8f4c66acb28aba6abe6b15128c06113 github.com/fogleman/primitive/primitive
bcfeb16b74e8aea9e2fe043406f2ef74b1cb0759 github.com/golang/freetype/raster
bcfeb16b74e8aea9e2fe043406f2ef74b1cb0759 github.com/golang/freetype/truetype
426cfd8eeb6e08ab1932954e09e3c2cb2bc6e36d golang.org/x/image/draw
426cfd8eeb6e08ab1932954e09e3c2cb2bc6e36d golang.org/x/image/font
426cfd8eeb6e08ab1932954e09e3c2cb2bc6e36d golang.org/x/image/font/basicfont
426cfd8eeb6e08ab1932954e09e3c2cb2bc6e36d golang.org/x/image/math/f64
426cfd8eeb6e08ab1932954e09e3c2cb2bc6e36d golang.org/x/image/math/fixed

1
vendor/github.com/Xe/x/irc/amerge/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
amerge

4
vendor/github.com/Xe/x/irc/amerge/README.md generated vendored Normal file
View File

@ -0,0 +1,4 @@
amerge
======
Utility for scraping and (later) merging atheme databases.

591
vendor/github.com/Xe/x/irc/amerge/database.go generated vendored Normal file
View File

@ -0,0 +1,591 @@
package main
import (
"bufio"
"errors"
"fmt"
"log"
"os"
"strconv"
"strings"
)
var (
NoSuchAcctErr = errors.New("There is no such account by that name")
NoSuchChanErr = errors.New("There is no such channel by that name")
NoSuchGroupErr = errors.New("There is no such group by that name")
)
type Database struct {
Version string
ModuleDeps []*Line
LastUID string
LastKID int
LastXID int
LastQID int
Accounts map[string]*Account
Channels map[string]*Channel
Bots map[string]*Bot
Groups map[string]*Group
Names map[string]*Name
Badwords []Badword
Klines []Kline
ConnectInfos []ConnectInfo
HostOffers []HostOffer
HostRequests []HostRequest
ClonesExemptions []ClonesExemption
Rwatches []Line
lines []*Line
file *bufio.Scanner
}
func NewDatabase(fname string) (db *Database, err error) {
fin, err := os.Open(fname)
if err != nil {
return
}
db = &Database{
Accounts: make(map[string]*Account),
Channels: make(map[string]*Channel),
Bots: make(map[string]*Bot),
Groups: make(map[string]*Group),
Names: make(map[string]*Name),
}
db.file = bufio.NewScanner(fin)
for db.file.Scan() {
rawline := db.file.Text()
l := &Line{}
split := strings.Split(rawline, " ")
l.Verb = split[0]
l.Args = split[1:]
db.lines = append(db.lines, l)
switch l.Verb {
case "DBV": // Database version
db.Version = l.Args[0]
case "MDEP": // Module dependency
db.ModuleDeps = append(db.ModuleDeps, l)
case "LUID": // Last used UID for accounts
db.LastUID = l.Args[0]
case "MU": // Create a user account
a := &Account{
Name: l.Args[1],
UID: l.Args[0],
Email: l.Args[3],
Password: l.Args[2],
Regtime: l.Args[4],
LastSeenTime: l.Args[5],
Metadata: make(map[string]string),
}
db.Accounts[strings.ToUpper(a.Name)] = a
case "MDU": // User metadata
account, err := db.GetAccount(l.Args[0])
if err != nil {
log.Panicf("Tried to read account %s but got %#v???", l.Args[0], err)
}
account.Metadata[l.Args[1]] = strings.Join(l.Args[2:], " ")
case "AC": // Account access rule (prevents nickserv protections for a mask)
account, err := db.GetAccount(l.Args[0])
if err != nil {
log.Panicf("Tried to read account %s but got %#v???", l.Args[0], err)
}
ac := Access{
AccountName: l.Args[0],
Mask: l.Args[1],
}
account.AccessList = append(account.AccessList, ac)
case "MI": // MemoServ IGNORE for a user
account, err := db.GetAccount(l.Args[0])
if err != nil {
log.Panicf("Tried to read account %s but got %#v???", l.Args[0], err)
}
account.Ignores = append(account.Ignores, l.Args[1])
case "MN": // Account nickname in nick group
account, err := db.GetAccount(l.Args[0])
if err != nil {
log.Panicf("Tried to read account %s but got %#v???", l.Args[0], err)
}
gn := GroupedNick{
Account: l.Args[0],
Name: l.Args[1],
Regtime: l.Args[2],
Seentime: l.Args[3],
}
account.Nicks = append(account.Nicks, gn)
case "MCFP": // Certificate Fingerprint
account, err := db.GetAccount(l.Args[0])
if err != nil {
log.Panicf("Tried to read account %s but got %#v???", l.Args[0], err)
}
account.CertFP = append(account.CertFP, l.Args[1])
case "ME": // Memo in user's inbox
account, err := db.GetAccount(l.Args[0])
if err != nil {
log.Panicf("Tried to read account %s but got %#v???", l.Args[0], err)
}
m := Memo{
Inbox: l.Args[0],
From: l.Args[1],
Time: l.Args[2],
Read: l.Args[3] == "1",
Contents: strings.Join(l.Args[4:], " "),
}
account.Memos = append(account.Memos, m)
case "MC": // Create a channel
mlockon, err := strconv.ParseInt(l.Args[4], 16, 0)
if err != nil {
panic(err)
}
c := &Channel{
Name: l.Args[0],
Regtime: l.Args[1],
Seentime: l.Args[2],
Flags: l.Args[3],
MlockOn: int(mlockon),
Metadata: make(map[string]string),
AccessMetadata: make(map[string]AccessMetadata),
}
db.Channels[strings.ToUpper(l.Args[0])] = c
case "CA": // ChanAcs
c, err := db.GetChannel(l.Args[0])
if err != nil {
log.Panicf("Tried to read channel %s but got %#v???", l.Args[0], err)
}
ca := ChanAc{
Channel: l.Args[0],
Account: l.Args[1],
FlagSet: l.Args[2],
DateGranted: l.Args[3],
WhoGranted: l.Args[4],
}
c.Access = append(c.Access, ca)
case "MDC": // Channel metadata
c, err := db.GetChannel(l.Args[0])
if err != nil {
log.Panicf("Tried to read channel %s but got %#v???", l.Args[0], err)
}
c.Metadata[l.Args[1]] = strings.Join(l.Args[2:], " ")
case "MDA": // Channel-based entity key->value
c, err := db.GetChannel(l.Args[0])
if err != nil {
log.Panicf("Tried to read channel %s but got %#v???", l.Args[0], err)
}
amd := AccessMetadata{
ChannelName: l.Args[0],
Entity: l.Args[1],
Key: l.Args[2],
Value: l.Args[3],
}
c.AccessMetadata[strings.ToUpper(amd.Key)] = amd
case "NAM":
nam := &Name{
Name: l.Args[0],
Metadata: make(map[string]string),
}
db.Names[strings.ToUpper(nam.Name)] = nam
case "MDN":
nam, ok := db.Names[strings.ToUpper(l.Args[0])]
if !ok {
panic("Atheme is broken with things")
}
nam.Metadata[l.Args[1]] = strings.Join(l.Args[2:], " ")
case "KID": // Biggest kline id used
kid, err := strconv.ParseInt(l.Args[0], 10, 0)
if err != nil {
panic("atheme is broken with KID " + l.Args[0])
}
db.LastKID = int(kid)
case "XID": // Biggest xline id used
xid, err := strconv.ParseInt(l.Args[0], 10, 0)
if err != nil {
panic("atheme is broken with XID " + l.Args[0])
}
db.LastXID = int(xid)
case "QID": // Biggest qline id used
qid, err := strconv.ParseInt(l.Args[0], 10, 0)
if err != nil {
panic("atheme is broken with QID " + l.Args[0])
}
db.LastQID = int(qid)
case "KL": // kline
id, err := strconv.ParseInt(l.Args[0], 10, 0)
if err != nil {
panic(err)
}
kl := Kline{
ID: int(id),
User: l.Args[1],
Host: l.Args[2],
Duration: l.Args[3],
DateSet: l.Args[4],
WhoSet: l.Args[5],
Reason: strings.Join(l.Args[6:], " "),
}
db.Klines = append(db.Klines, kl)
case "BOT": // BotServ bot
bot := &Bot{
Nick: l.Args[0],
User: l.Args[1],
Host: l.Args[2],
IsPrivate: l.Args[3] == "1",
CreationDate: l.Args[4],
Gecos: l.Args[5],
}
db.Bots[strings.ToUpper(bot.Nick)] = bot
case "BW": // BADWORDS entry
bw := Badword{
Mask: l.Args[0],
TimeSet: l.Args[1],
Setter: l.Args[2],
}
if len(l.Args) == 5 {
bw.Channel = l.Args[3]
bw.Action = l.Args[4]
} else {
bw.Setter = bw.Setter + " " + l.Args[3]
bw.Channel = l.Args[4]
bw.Action = l.Args[5]
}
db.Badwords = append(db.Badwords, bw) // TODO: move this to Channel?
case "GRP": // Group
g := &Group{
UID: l.Args[0],
Name: l.Args[1],
CreationDate: l.Args[2],
Flags: l.Args[3],
Metadata: make(map[string]string),
}
db.Groups[strings.ToUpper(l.Args[1])] = g
case "GACL": // Group access list
g, err := db.GetGroup(l.Args[0])
if err != nil {
log.Panicf("Tried to read group %s but got %#v???", l.Args[0], err)
}
gacl := GroupACL{
GroupName: l.Args[0],
AccountName: l.Args[1],
Flags: l.Args[2],
}
g.ACL = append(g.ACL, gacl)
case "MDG": // Group Metadata
g, err := db.GetGroup(l.Args[0])
if err != nil {
log.Panicf("Tried to read group %s but got %#v???", l.Args[0], err)
}
g.Metadata[l.Args[1]] = strings.Join(l.Args[2:], " ")
case "CLONES-EX": // CLONES exemptions
ce := ClonesExemption{
IP: l.Args[0],
Min: l.Args[1],
Max: l.Args[2],
Expiry: l.Args[3],
Reason: strings.Join(l.Args[4:], " "),
}
db.ClonesExemptions = append(db.ClonesExemptions, ce)
case "LI": // InfoServ INFO posts
ci := ConnectInfo{
Creator: l.Args[0],
Topic: l.Args[1],
CreationDate: l.Args[2],
Body: strings.Join(l.Args[3:], " "),
}
db.ConnectInfos = append(db.ConnectInfos, ci)
case "HO": // Vhost offer
var ho HostOffer
if len(l.Args) == 3 {
ho = HostOffer{
Vhost: l.Args[0],
CreationDate: l.Args[1],
Creator: l.Args[2],
}
} else {
ho = HostOffer{
Group: l.Args[0],
Vhost: l.Args[1],
CreationDate: l.Args[2],
Creator: l.Args[3],
}
}
db.HostOffers = append(db.HostOffers, ho)
case "HR": // Vhost request
hr := HostRequest{
Account: l.Args[0],
Vhost: l.Args[1],
RequestTime: l.Args[2],
}
db.HostRequests = append(db.HostRequests, hr)
// Verbs to ignore
case "":
default:
fmt.Printf("%#v\n", l)
}
}
return
}
func (db *Database) GetAccount(name string) (*Account, error) {
account, ok := db.Accounts[strings.ToUpper(name)]
if !ok {
return nil, NoSuchAcctErr
}
return account, nil
}
func (db *Database) GetChannel(name string) (*Channel, error) {
channel, ok := db.Channels[strings.ToUpper(name)]
if !ok {
return nil, NoSuchChanErr
}
return channel, nil
}
func (db *Database) GetGroup(name string) (*Group, error) {
group, ok := db.Groups[strings.ToUpper(name)]
if !ok {
return nil, NoSuchGroupErr
}
return group, nil
}
func (db *Database) GetBot(name string) (*Bot, error) {
group, ok := db.Bots[strings.ToUpper(name)]
if !ok {
return nil, NoSuchGroupErr
}
return group, nil
}
type Line struct {
Verb string
Args []string
}
type Account struct {
Name string
Email string
Flags string
Kind string
UID string
Password string
Regtime string
LastSeenTime string
Metadata map[string]string
Nicks []GroupedNick
Memos []Memo
CertFP []string
AccessList []Access
Ignores []string
}
type Access struct {
AccountName string
Mask string
}
type Name struct {
Name string
Metadata map[string]string
}
type GroupedNick struct {
Account string
Name string
Regtime string
Seentime string
}
type Memo struct {
Inbox string
From string // an account name
Time string
Read bool
Contents string
}
type Channel struct {
Name string
Regtime string
Seentime string
Flags string
MlockOn int
MlockOff int
MlockLimit int
MlockKey string
Access []ChanAc
Metadata map[string]string
AccessMetadata map[string]AccessMetadata
}
type AccessMetadata struct {
ChannelName string
Entity string
Key string
Value string
}
type ChanAc struct {
Channel string
Account string
FlagSet string
DateGranted string
WhoGranted string
}
type Kline struct {
ID int
User string
Host string
Duration string
DateSet string
WhoSet string
Reason string
}
type Bot struct {
Nick string
User string
Host string
IsPrivate bool
CreationDate string
Gecos string
}
type Badword struct {
Mask string
TimeSet string
Setter string // can be Foo or Foo (Bar)
Channel string
Action string
}
type Group struct {
UID string
Name string
CreationDate string
Flags string
ACL []GroupACL
Metadata map[string]string
}
type GroupACL struct {
GroupName string
AccountName string
Flags string
}
type ConnectInfo struct {
Creator string
Topic string
CreationDate string
Body string
}
type HostOffer struct { // if args number is 3 no group
Group string
Vhost string
CreationDate string
Creator string
}
type HostRequest struct {
Account string
Vhost string
RequestTime string
}
type ClonesExemption struct {
IP string
Min string
Max string
Expiry string
Reason string
}

81
vendor/github.com/Xe/x/irc/amerge/main.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
package main
import (
"flag"
"log"
"strings"
)
var (
leftFname = flag.String("left-db", "./left.db", "database to read from to compare as the left hand")
rightFname = flag.String("right-db", "./right.db", "\" for the right hand side")
)
func main() {
flag.Parse()
leftDB, err := NewDatabase(*leftFname)
if err != nil {
panic(err)
}
_ = leftDB
rightDB, err := NewDatabase(*rightFname)
if err != nil {
panic(err)
}
_ = rightDB
result := &Database{
Accounts: make(map[string]*Account),
Channels: make(map[string]*Channel),
Bots: make(map[string]*Bot),
Groups: make(map[string]*Group),
Names: make(map[string]*Name),
}
_ = result
// Compare accounts and grouped nicks in left database to names in right database
// this is O(scary)
for leftAccountName, acc := range leftDB.Accounts {
for _, leftGroupedNick := range acc.Nicks {
conflictAcc, err := rightDB.GetAccount(leftGroupedNick.Name)
if err != nil {
goto botcheck
}
if conflictAcc.Email == acc.Email {
//log.Printf("Can ignore %s, they are the same user by email account", acc.Name)
goto botcheck
}
log.Printf(
"While trying to see if %s:%s is present in right database, found a conflict with %s",
acc.Name, leftGroupedNick.Name, conflictAcc.Name,
)
log.Printf(
"left: %s %s %s %s",
acc.Name, acc.Email, acc.Regtime, acc.LastSeenTime,
)
log.Printf(
"right: %s %s %s %s",
conflictAcc.Name, conflictAcc.Email, conflictAcc.Regtime, conflictAcc.LastSeenTime,
)
botcheck:
//log.Printf("Checking for bot collisions for %s:%s...", acc.Name, leftGroupedNick.Name)
conflictBot, err := rightDB.GetBot(leftGroupedNick.Name)
if err != nil {
goto next
}
if strings.ToUpper(conflictBot.Nick) == leftAccountName {
log.Printf("Nickname %s conflicts with right's bot %s", leftGroupedNick.Name, conflictBot.Nick)
}
next:
}
}
}

1
vendor/github.com/Xe/x/irc/bncadmin/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
.env

13
vendor/github.com/Xe/x/irc/bncadmin/box.rb generated vendored Normal file
View File

@ -0,0 +1,13 @@
from "xena/go"
workdir "/"
copy "main.go", "/go/src/github.com/Xe/tools/irc/bncadmin/main.go"
copy "vendor", "/go/src/github.com/Xe/tools/irc/bncadmin/"
run "go install github.com/Xe/tools/irc/bncadmin && cp /go/bin/bncadmin /usr/bin/bncadmin"
run "rm -rf /usr/local/go /go && apk del bash gcc musl-dev openssl go"
flatten
cmd "/usr/bin/bncadmin"
tag "xena/bncadmin"

191
vendor/github.com/Xe/x/irc/bncadmin/main.go generated vendored Normal file
View File

@ -0,0 +1,191 @@
package main
import (
"crypto/tls"
"fmt"
"log"
"os"
"strings"
"sync"
"time"
"github.com/belak/irc"
_ "github.com/joho/godotenv/autoload"
)
var (
bncUsername = needEnv("BNC_USERNAME")
bncPassword = needEnv("BNC_PASSWORD")
bncServer = needEnv("BNC_SERVER")
serverSuffixExpected = needEnv("SERVER_SUFFIX")
)
func needEnv(key string) string {
v := os.Getenv(key)
if v == "" {
log.Fatal("need value for " + key)
}
return v
}
func main() {
log.Println("Bot connecting to " + bncServer)
conn, err := tls.Dial("tcp", bncServer, &tls.Config{
InsecureSkipVerify: true,
})
if err != nil {
log.Fatal(err)
}
defer conn.Close()
c := irc.NewClient(conn, irc.ClientConfig{
Nick: "admin",
Pass: fmt.Sprintf("%s:%s", bncUsername, bncPassword),
User: "BNCbot",
Name: "BNC admin bot",
Handler: NewBot(),
})
for _, cap := range []string{"userhost-in-names", "multi-prefix", "znc.in/server-time-iso"} {
c.Writef("CAP REQ %s", cap)
}
err = c.Run()
if err != nil {
main()
}
}
type Bot struct {
setupDaemon sync.Once
lookingForUserNetworks bool
// i am sorry
launUsername string
}
func NewBot() *Bot {
return &Bot{}
}
func (b *Bot) Handle(c *irc.Client, m *irc.Message) {
b.setupDaemon.Do(func() {
go func() {
for {
b.lookingForUserNetworks = true
c.Writef("PRIVMSG *status ListAllUserNetworks")
time.Sleep(2 * time.Second) // always sleep 2
b.lookingForUserNetworks = false
time.Sleep(1 * time.Hour)
}
}()
})
// log.Printf("in >> %s", m)
switch m.Command {
case "PRIVMSG":
if m.Prefix.Name == "*status" {
b.HandleStarStatus(c, m)
}
if strings.HasPrefix(m.Prefix.Name, "?") {
b.HandlePartyLineCommand(c, m)
}
if m.Params[0] == "#bnc" {
b.HandleCommand(c, m)
}
case "NOTICE":
if m.Prefix.Name == "*status" {
f := strings.Fields(m.Trailing())
if f[0] == "***" {
log.Println(m.Trailing())
// look up geoip and log here
}
}
}
}
func (b *Bot) HandleStarStatus(c *irc.Client, m *irc.Message) {
if b.lookingForUserNetworks {
if strings.HasPrefix(m.Trailing(), "| ") {
f := strings.Fields(m.Trailing())
switch len(f) {
case 11: // user name line
// 11: []string{"|", "AzureDiamond", "|", "N/A", "|", "0", "|", "|", "|", "|", "|"}
username := f[1]
b.launUsername = username
case 15: // server and nick!user@host line
// 15: []string{"|", "`-", "|", "PonyChat", "|", "0", "|", "Yes", "|", "amethyststar.ponychat.net", "|", "test!test@lypbmzxixk.ponychat.net", "|", "1", "|"}
server := f[9]
network := f[3]
if !strings.HasSuffix(server, serverSuffixExpected) {
log.Printf("%s is using the BNC to connect to unknown server %s, removing permissions", b.launUsername, server)
b.RemoveNetwork(c, b.launUsername, network)
c.Writef("PRIVMSG ?%s :You have violated the terms of the BNC service and your account has been disabled. Please contact PonyChat staff to appeal this.", b.launUsername)
c.Writef("PRIVMSG *blockuser block %s", b.launUsername)
}
}
}
}
}
func (b *Bot) HandlePartyLineCommand(c *irc.Client, m *irc.Message) {
split := strings.Fields(m.Trailing())
username := m.Prefix.Name[1:]
if len(split) == 0 {
return
}
switch strings.ToLower(split[0]) {
case "help":
c.Writef("PRIVMSG ?%s :Commands available:", username)
c.Writef("PRIVMSG ?%s :- ChangeName <new desired \"real name\">", username)
c.Writef("PRIVMSG ?%s : Changes your IRC \"real name\" to a new value instead of the default", username)
c.Writef("PRIVMSG ?%s :- Reconnect", username)
c.Writef("PRIVMSG ?%s : Disconnects from PonyChat and connects to PonyChat again", username)
c.Writef("PRIVMSG ?%s :- Help", username)
c.Writef("PRIVMSG ?%s : Shows this Message", username)
case "changename":
if len(split) < 1 {
c.Writef("NOTICE %s :Usage: ChangeName <new desired \"real name\">")
return
}
gecos := strings.Join(split[1:], " ")
c.Writef("PRIVMSG *controlpanel :Set RealName %s %s", username, gecos)
c.Writef("PRIVMSG ?%s :Please reply %q to confirm changing your \"real name\" to: %s", username, "Reconnect", gecos)
case "reconnect":
c.Writef("PRIVMSG ?%s :Reconnecting...", username)
c.Writef("PRIVMSG *controlpanel Reconnect %s PonyChat", username)
}
}
func (b *Bot) HandleCommand(c *irc.Client, m *irc.Message) {
split := strings.Fields(m.Trailing())
if split[0][0] == ';' {
switch strings.ToLower(split[0][1:]) {
case "request":
c.Write("PRIVMSG #bnc :In order to request a BNC account, please connect to the bouncer server (bnc.ponychat.net, ssl port 6697, allow untrusted certs) with your nickserv username and passsword in the server password field (example: AzureDiamond:hunter2)")
case "help":
c.Write("PRIVMSG #bnc :PonyChat bouncer help is available here: https://ponychat.net/help/bnc/")
case "rules":
c.Write("PRIVMSG #bnc :Terms of the BNC")
c.Write("PRIVMSG #bnc :- Do not use the BNC to evade channel bans")
c.Write("PRIVMSG #bnc :- Do not use the BNC to violate any network rules")
c.Write("PRIVMSG #bnc :- Do not use the BNC to connect to any other IRC network than PonyChat")
}
}
}
func (b *Bot) RemoveNetwork(c *irc.Client, username, network string) {
c.Writef("PRIVMSG *controlpanel :DelNetwork %s %s", username, network)
}

11
vendor/github.com/Xe/x/irc/bncadmin/run.sh generated vendored Normal file
View File

@ -0,0 +1,11 @@
#!/bin/bash
set -e
set -x
box box.rb
docker push xena/bncadmin
hyper rm -f bncadmin ||:
hyper pull xena/bncadmin
hyper run --name bncadmin --restart=always -dit --size s1 --env-file .env xena/bncadmin

3
vendor/github.com/Xe/x/irc/bncadmin/vendor-log generated vendored Normal file
View File

@ -0,0 +1,3 @@
fd04337c94f98ab7c2ef34fbeb4e821284775095 github.com/belak/irc
4ed13390c0acd2ff4e371e64d8b97c8954138243 github.com/joho/godotenv
4ed13390c0acd2ff4e371e64d8b97c8954138243 github.com/joho/godotenv/autoload

1
vendor/github.com/Xe/x/irc/clevelandbrown/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
.env

301
vendor/github.com/Xe/x/irc/clevelandbrown/main.go generated vendored Normal file
View File

@ -0,0 +1,301 @@
package main
import (
"crypto/tls"
"log"
"os"
"strings"
"sync"
"time"
"github.com/Xe/ln"
_ "github.com/joho/godotenv/autoload"
irc "gopkg.in/irc.v1"
)
var (
addr = os.Getenv("SERVER")
password = os.Getenv("PASSWORD")
sclock sync.Mutex
scores map[string]float64
)
func main() {
scores = map[string]float64{}
conn, err := tls.Dial("tcp", addr, &tls.Config{
InsecureSkipVerify: true,
})
if err != nil {
log.Fatal(err)
}
ln.Log(ln.F{
"action": "connected",
"where": addr,
})
cli := irc.NewClient(conn, irc.ClientConfig{
Handler: irc.HandlerFunc(scoreCleveland),
Nick: "Xena",
User: "xena",
Name: "cleveland brown termination bot",
Pass: password,
})
ff := ln.FilterFunc(func(e ln.Event) bool {
if val, ok := e.Data["svclog"]; ok && val.(bool) {
delete(e.Data, "svclog")
line, err := ln.DefaultFormatter.Format(e)
if err != nil {
ln.Fatal(ln.F{"err": err})
}
err = cli.Writef("PRIVMSG #services :%s", string(line))
if err != nil {
log.Fatal(err)
}
}
return true
})
ln.DefaultLogger.Filters = append(ln.DefaultLogger.Filters, ff)
go func() {
for {
time.Sleep(30 * time.Second)
sclock.Lock()
defer sclock.Unlock()
changed := 0
ignored := 0
for key, sc := range scores {
if sc >= notifyThreshold {
ignored++
continue
}
scores[key] = sc / 100
changed++
}
sclock.Unlock()
ln.Log(ln.F{
"action": "nerfed_scores",
"changed": changed,
"ignored": ignored,
})
}
}()
go func() {
for {
time.Sleep(5 * time.Minute)
sclock.Lock()
defer sclock.Unlock()
nsc := map[string]float64{}
halved := 0
rem := 0
for key, score := range scores {
if score > 0.01 {
if score > 3 {
score = score / 2
halved++
}
nsc[key] = score
} else {
rem++
}
}
scores = nsc
ln.Log(ln.F{
"action": "reaped_scores",
"removed": rem,
"halved": halved,
"svclog": true,
})
sclock.Unlock()
}
}()
ln.Log(ln.F{
"action": "accepting_input",
"svclog": true,
})
cli.Run()
}
const (
notifyThreshold = 3
autobanThreshold = 10
)
func scoreCleveland(c *irc.Client, m *irc.Message) {
if m.Trailing() == "!ohshitkillit" && m.Prefix.Host == "ponychat.net" {
ln.Fatal(ln.F{
"action": "emergency_stop",
"user": m.Prefix.String(),
"channel": m.Params[0],
"svclog": true,
})
}
sclock.Lock()
defer sclock.Unlock()
if m.Command != "PRIVMSG" {
return
}
/*
if !strings.HasSuffix(m.Params[0], monitorChan) {
return
}
*/
switch m.Params[0] {
case "#services", "#/dev/syslog":
return
}
switch m.Prefix.Name {
case "Taz", "cadance-syslog", "FromDiscord", "Sonata_Dusk", "CQ_Discord", "Onion":
return
case "Sparkler":
// (Sparkler) lol
// (Sparkler) don't banzor me :(
return
case "Aeyris":
// known shitposter, collison risk :(
return
case "Ryunosuke", "WaterStar":
return
}
sc, ok := scores[m.Prefix.Host]
if !ok {
sc = 0
}
for _, line := range lines {
if strings.Contains(strings.ToLower(m.Trailing()), line) {
sc += 1
ln.Log(ln.F{
"action": "siren_compare",
"channel": m.Params[0],
"user": m.Prefix.String(),
"scoredelta": 1,
"svclog": true,
})
}
}
thisLine := strings.ToLower(m.Trailing())
for _, efnLine := range efknockr {
if strings.Contains(thisLine, strings.ToLower(efnLine)) {
sc += 3
ln.Log(ln.F{
"action": "efknockr_detected",
"score": sc,
"user": m.Prefix.String(),
"channel": m.Params[0],
"delta": 3,
"svclog": true,
})
}
}
scores[m.Prefix.Host] = sc
if sc >= notifyThreshold {
ln.Log(ln.F{
"action": "warn",
"channel": m.Params[0],
"user": m.Prefix.String(),
"score": sc,
"svclog": true,
"ping": "Xena",
})
}
if sc >= autobanThreshold {
c.Writef("PRIVMSG OperServ :AKILL ADD %s spamming | Cleveland show spammer", m.Prefix.Name)
c.Writef("PRIVMSG %s :Sorry for that, he's gone now.", m.Params[0])
ln.Log(ln.F{
"action": "kline_added",
"channel": m.Params[0],
"user": m.Prefix.String(),
"score": sc,
"svclog": true,
})
scores[m.Prefix.Host] = 0
}
}
const showLyrics = `my name is cleveland brown and I am proud to be
right back in my hometown with my new family.
there's old friends and new friends and even a bear.
through good times and bad times it's true love we share.
and so I found a place
where everyone will know
my happy mustache face
this is the cleveland show! haha!`
var lines = []string{
"my name is cleveland brown and I am proud to be",
"my name is cl3v3land brown and i am proud to be",
"right back in my hometown with my new family",
"right back in my hometown with my n3w family",
"there's old friends and new friends and even a bear",
"through good times and bad times it's true love we share.",
"and so I found a place",
"where everyone will know",
"my happy mustache face",
"this is the cleveland show! haha!",
}
var efknockr = []string{
"THIS NETWORK IS FUCKING BLOWJOBS LOL COME TO WORMSEC FOR SOME ICE COLD CHATS",
"0 DAY BANANA BOMBS \"OK\"",
"IRC.WORMSEC.US",
"THE HOTTEST MOST EXCLUSIVE SEC ON THE NET",
"THIS NETWORK IS BLOWJOBS! GET ON SUPERNETS FOR COLD HARD CHATS NOW",
"IRC.SUPERNETS.ORG | PORT 6667/6697 (SSL) | #SUPERBOWL | IPV6 READY",
"▓█▓▓▓▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▄░ ░▄▓▓▓▓▓▓▓▓▓█▓▓▓ IRC.WORMSEC.US | #SUPERBOWL",
"THIS NETWORK IS BLOWJOBS! GET ON SUPERNETS FOR COLD HARD CHATS NOW",
"▄",
"███▓▓▒▒▒▒▒▒▒░░░ ░░░░▒▒▒▓▓▓▓",
"▓█▓▓▓▒▒▒▒▒▒▓▓▓▓▓▓▓▓▓▓▄░ ░▄▓▓▓▓▓▓▓▓▓█▓▓▓",
"▒▓▓▓▓▒▒░░▒█▓▓▓▓▓▓▓▓▓▓█░▒░░▒▓▓▓▓▓▓▓▓▓▓▓█▓▓",
"░▒▓▓▒▒▒▒░░▒▒█▓▓▓▓▓▓▓▓▓█░▒░░░▒▓▓▓▓▓▓▓▓▓▓█▒▓░",
"▒▒▒▒▒▒▒▒▒▒▒░░▀▀▀▀▀▀▀ ░▒░░ ░▒▒▒▀▀▀▀▀▀▒▓▓▓▒",
"THE HOTTEST MOST EXCLUSIVE SEC ON THE NET",
"â–‘â–’â–“â–“â–’â–’â–’â–’â–‘â–‘â–’",
"Techman likes to fuck kids in the ass!!",
"https://discord.gg/3b86TH7",
"| |\\\\",
"/ \\ ||",
"( ,( )=m=D~~~ LOL DONGS",
"/ / | |",
}

1
vendor/github.com/Xe/x/irc/kcpd/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
cfg

4
vendor/github.com/Xe/x/irc/kcpd/README.md generated vendored Normal file
View File

@ -0,0 +1,4 @@
kcpd
====
A simple relay for multiplexing IRC sessions. Useful for bouncers or proxies.

93
vendor/github.com/Xe/x/irc/kcpd/client.go generated vendored Normal file
View File

@ -0,0 +1,93 @@
package main
import (
"crypto/tls"
"errors"
"io"
"net"
kcp "github.com/xtaci/kcp-go"
"github.com/xtaci/smux"
)
// Client opens a TCP listener and forwards traffic to the remote server over KCP.
type Client struct {
cfg *Config
listener net.Listener
}
// ErrBadConfig means the configuration is not correctly defined.
var ErrBadConfig = errors.New("kcpd: bad configuration file")
// NewClient constructs a new client with a given config.
func NewClient(cfg *Config) (*Client, error) {
if cfg.Mode != "client" {
return nil, ErrBadConfig
}
if cfg.ClientServerAddress == "" && cfg.ClientUsername == "" && cfg.ClientPassword == "" && cfg.ClientBindaddr == "" {
return nil, ErrBadConfig
}
return &Client{cfg: cfg}, nil
}
// Dial blockingly connects to the remote server and relays TCP traffic.
func (c *Client) Dial() error {
conn, err := kcp.Dial(c.cfg.ClientServerAddress)
if err != nil {
return err
}
defer conn.Close()
tlsConn := tls.Client(conn, &tls.Config{
InsecureSkipVerify: true, // XXX hack please remove
})
defer tlsConn.Close()
session, err := smux.Client(tlsConn, smux.DefaultConfig())
if err != nil {
return err
}
defer session.Close()
l, err := net.Listen("tcp", c.cfg.ClientBindaddr)
if err != nil {
return err
}
defer l.Close()
c.listener = l
for {
cconn, err := l.Accept()
if err != nil {
break
}
cstream, err := session.OpenStream()
if err != nil {
break
}
go copyConn(cconn, cstream)
}
return nil
}
// Close frees resouces acquired in the client.
func (c *Client) Close() error {
return c.listener.Close()
}
// copyConn copies one connection to another bidirectionally.
func copyConn(left, right net.Conn) error {
defer left.Close()
defer right.Close()
go io.Copy(left, right)
io.Copy(right, left)
return nil
}

9
vendor/github.com/Xe/x/irc/kcpd/gopreload.go generated vendored Normal file
View File

@ -0,0 +1,9 @@
// gopreload.go
package main
/*
This file is separate to make it very easy to both add into an application, but
also very easy to remove.
*/
import _ "github.com/Xe/gopreload"

13
vendor/github.com/Xe/x/irc/kcpd/gops.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package main
import (
"log"
"github.com/google/gops/agent"
)
func init() {
if err := agent.Listen(nil); err != nil {
log.Fatal(err)
}
}

110
vendor/github.com/Xe/x/irc/kcpd/main.go generated vendored Normal file
View File

@ -0,0 +1,110 @@
package main
import (
"flag"
"io/ioutil"
"log"
"os"
"time"
"github.com/caarlos0/env"
_ "github.com/joho/godotenv/autoload"
yaml "gopkg.in/yaml.v1"
)
// Config is the configuration for kcpd
type Config struct {
Mode string `env:"KCPD_MODE,required" envDefault:"server" yaml:"mode"`
// Client mode config
// What IP the client should connect to
ClientServerAddress string `env:"KCPD_SERVER_ADDRESS" yaml:"server"`
// Administrator's NickServ username
ClientUsername string `env:"KCPD_ADMIN_USERNAME" yaml:"admin_username"`
// Administrator's NickServ password
ClientPassword string `env:"KCPD_ADMIN_PASSWORD" yaml:"admin_password"`
// Local bindaddr
ClientBindaddr string `env:"KCPD_CLIENT_BINDADDR" yaml:"client_bind"`
// Server mode config
// What UDP port/address should kcpd bind on?
ServerBindAddr string `env:"KCPD_BIND" yaml:"bind"`
// Atheme URL for Nickserv authentication of the administrator for setting up KCP sessions
ServerAthemeURL string `env:"KCPD_ATHEME_URL" yaml:"atheme_url"`
// URL endpoint for allowing/denying users
ServerAllowListEndpoint string `env:"KCPD_ALLOWLIST_ENDPOINT" yaml:"allow_list_endpoint"`
// local ircd (unsecure) endpoint
ServerLocalIRCd string `env:"KCPD_LOCAL_IRCD" yaml:"local_ircd"`
// WEBIRC password to use for local sockets
ServerWEBIRCPassword string `env:"KCPD_WEBIRC_PASSWORD" yaml:"webirc_password"`
// ServerTLSCert is the TLS cert file
ServerTLSCert string `env:"KCPD_TLS_CERT" yaml:"tls_cert"`
// ServerTLSKey is the TLS key file
ServerTLSKey string `env:"KCPD_TLS_KEY" yaml:"tls_key"`
}
var (
configFname = flag.String("config", "", "configuration file to use (if unset config will be pulled from the environment)")
)
func main() {
flag.Parse()
cfg := &Config{}
if *configFname != "" {
fin, err := os.Open(*configFname)
if err != nil {
log.Fatal(err)
}
defer fin.Close()
data, err := ioutil.ReadAll(fin)
if err != nil {
log.Fatal(err)
}
err = yaml.Unmarshal(data, cfg)
if err != nil {
log.Fatal(err)
}
} else {
err := env.Parse(cfg)
if err != nil {
log.Fatal(err)
}
}
switch cfg.Mode {
case "client":
c, err := NewClient(cfg)
if err != nil {
log.Fatal(err)
}
for {
err = c.Dial()
if err != nil {
log.Println(err)
}
time.Sleep(time.Second)
}
case "server":
s, err := NewServer(cfg)
if err != nil {
log.Fatal(err)
}
err = s.ListenAndServe()
if err != nil {
log.Fatal(err)
}
default:
log.Fatal(ErrBadConfig)
}
}

97
vendor/github.com/Xe/x/irc/kcpd/server.go generated vendored Normal file
View File

@ -0,0 +1,97 @@
package main
import (
"crypto/tls"
"fmt"
"log"
"net"
kcp "github.com/xtaci/kcp-go"
"github.com/xtaci/smux"
)
// Server represents the server side of kcpd. It listens on KCP and emits TCP connections from KCP streams.
type Server struct {
cfg *Config
}
// NewServer creates a new Server and validates config.
func NewServer(cfg *Config) (*Server, error) {
if cfg.Mode != "server" {
return nil, ErrBadConfig
}
if cfg.ServerBindAddr == "" && cfg.ServerAthemeURL == "" && cfg.ServerAllowListEndpoint == "" && cfg.ServerLocalIRCd == "" && cfg.ServerWEBIRCPassword == "" && cfg.ServerTLSCert == "" && cfg.ServerTLSKey == "" {
return nil, ErrBadConfig
}
return &Server{cfg: cfg}, nil
}
// ListenAndServe blockingly listens on the UDP port and relays KCP streams to TCP sockets.
func (s *Server) ListenAndServe() error {
l, err := kcp.Listen(s.cfg.ServerBindAddr)
if err != nil {
return err
}
defer l.Close()
log.Printf("listening on KCP: %v", l.Addr())
for {
conn, err := l.Accept()
if err != nil {
log.Println(err)
continue
}
go s.handleConn(conn)
}
}
func (s *Server) handleConn(conn net.Conn) error {
defer conn.Close()
log.Printf("new client: %v", conn.RemoteAddr())
cert, err := tls.LoadX509KeyPair(s.cfg.ServerTLSCert, s.cfg.ServerTLSKey)
if err != nil {
return err
}
tcfg := &tls.Config{
InsecureSkipVerify: true, // XXX hack remove
Certificates: []tls.Certificate{cert},
}
tlsConn := tls.Server(conn, tcfg)
defer tlsConn.Close()
session, err := smux.Server(tlsConn, smux.DefaultConfig())
if err != nil {
return err
}
defer session.Close()
for {
cstream, err := session.AcceptStream()
if err != nil {
log.Printf("client at %s error: %v", conn.RemoteAddr(), err)
return err
}
ircConn, err := net.Dial("tcp", s.cfg.ServerLocalIRCd)
if err != nil {
log.Printf("client at %s error: %v", conn.RemoteAddr(), err)
return err
}
host, _, _ := net.SplitHostPort(conn.RemoteAddr().String())
fmt.Fprintf(ircConn, "WEBIRC %s %s %s %s\r\n", s.cfg.ServerWEBIRCPassword, RandStringRunes(8), host, host)
go copyConn(cstream, ircConn)
}
return nil
}

81
vendor/github.com/Xe/x/irc/kcpd/stdlibsvc/README.md generated vendored Normal file
View File

@ -0,0 +1,81 @@
# Your stdlib service: xena/kcpdwhitelist
This is the README for your service.
A few notes;
`package.json` is NPM-compatible and contains some stdlib configuration details.
`.gitignore` has also been provided for your convenience.
# package.json
This is a standard `package.json`. You'll notice an additional `"stdlib"` field.
You can configure your service for the stdlib registry using;
`name` - The name to register on stdlib, in the format of `<username>/<service>`.
In order to compile to the registry you must have permission to compile to the
provided username's account.
`defaultFunction` - Execute if provided no function route (root service).
If not specified, your base service route will provide a list of available
functions in JSON format.
`timeout` - The time in ms at which to kill service execution. Free accounts are
limited to 30 seconds (30000).
`publish` - Whether to publish releases (versioned) to the stdlib public
directory. Packages pushed to the registry in non-release environments will
never be published.
# env.json
Environment configuration for your service. Each top level key (i.e.
`"dev"` and `"release"`) specifies their own set of key-value
pairs for a specific execution environment. The keys and values specified
are automatically added to the `process.env` variable in Node.js.
`"dev"` is the *non-configurable* name of the local environment, but can
also be used as an environment name for compilation
(i.e. `$ lib up development`).
`"release"` is the *non-configurable* name of the production environment when
you create releases with `$ lib release`.
You can add additional environments and key-value pairs, and use them for
compilation with `lib up <environment>`. Note that free accounts are
restricted to one compilation environment (aside from `"release"`).
*We recommend against checking this file in to version control*. It will be
saved with your tarball and is privately retrievable from the stdlib registry
using your account credentials. It has been added to `.gitignore` by default.
# f/main/function.json
This is your function definition file. The following fields can be used for
execution configuration of specific functions within your service.
`name` - The function name. This maps to an execution route over HTTP. For
example, `xena/kcpdwhitelist/main` would map to the first
function you've created.
`description` - A brief description of the function. To provide detailed
information about function execution, overwrite this README.
`args` - An `Array` describing each argument as you expect them to be passed to
`params.args`.
`kwargs` - An `Object` describing each keyword argument as you expect them to be
passed to `params.kwargs`
`http` - Information to provide to function requests over HTTP.
`http.headers` - HTTP headers to return in the response. Examples are
`"Content-Type"` to specify file type if your function returns a `Buffer` or
`"Access-Control-Allow-Origin"` to restrict browser-based function requests.
# f/main/index.js
The entry point to your function described in `f/main/function.json`.
This is *non-configurable*. You may add as many subdirectories and supportive
files as you like, but `index.js` will remain the entry point and *must*
export a function to be active.

8
vendor/github.com/Xe/x/irc/kcpd/stdlibsvc/env.json generated vendored Normal file
View File

@ -0,0 +1,8 @@
{
"dev": {
"key": "value"
},
"release": {
"key": "value"
}
}

View File

@ -0,0 +1,15 @@
{
"name": "main",
"description": "Function",
"args": [
"First argument",
"Second argument"
],
"kwargs": {
"alpha": "Keyword argument alpha",
"beta": "Keyword argument beta"
},
"http": {
"headers": {}
}
}

View File

@ -0,0 +1,32 @@
/* Import dependencies, declare constants */
win = (callback) => {
callback(null, "allowed");
};
fail = (callback) => {
callback("not allowed");
};
/**
* Your function call
* @param {Object} params Execution parameters
* Members
* - {Array} args Arguments passed to function
* - {Object} kwargs Keyword arguments (key-value pairs) passed to function
* - {String} remoteAddress The IPv4 or IPv6 address of the caller
*
* @param {Function} callback Execute this to end the function call
* Arguments
* - {Error} error The error to show if function fails
* - {Any} returnValue JSON serializable (or Buffer) return value
*/
module.exports = (params, callback) => {
switch (params.kwargs.user) {
case "Xena":
win(callback);
default:
fail(callback);
}
};

Some files were not shown because too many files have changed in this diff Show More