From 2fb2ab8fa222afe36815f82e93430631044d2f67 Mon Sep 17 00:00:00 2001 From: Christine Dodrill Date: Sun, 22 Jan 2017 09:36:44 -0800 Subject: [PATCH] vendor dependencies --- lib/tunnel/client.go | 3 +- lib/tunnel/helper_test.go | 1 - lib/tunnel/server.go | 3 +- lib/tunnel/tcpproxy.go | 2 +- lib/tunnel/tunnel_test.go | 1 - lib/tunnel/util.go | 1 - vendor-log | 22 + .../github.com/GoRethink/gorethink/cluster.go | 522 +++++ .../gorethink/cluster_integration_test.go | 99 + .../GoRethink/gorethink/cluster_test.go | 63 + .../GoRethink/gorethink/connection.go | 381 ++++ .../gorethink/connection_handshake.go | 450 +++++ .../GoRethink/gorethink/connection_helper.go | 41 + .../github.com/GoRethink/gorethink/cursor.go | 710 +++++++ vendor/github.com/GoRethink/gorethink/doc.go | 6 + .../github.com/GoRethink/gorethink/errors.go | 182 ++ .../GoRethink/gorethink/gorethink.go | 58 + vendor/github.com/GoRethink/gorethink/host.go | 24 + vendor/github.com/GoRethink/gorethink/mock.go | 394 ++++ vendor/github.com/GoRethink/gorethink/node.go | 133 ++ vendor/github.com/GoRethink/gorethink/pool.go | 200 ++ .../GoRethink/gorethink/pseudotypes.go | 235 +++ .../github.com/GoRethink/gorethink/query.go | 455 +++++ .../GoRethink/gorethink/query_admin.go | 85 + .../GoRethink/gorethink/query_aggregation.go | 362 ++++ .../GoRethink/gorethink/query_control.go | 395 ++++ .../GoRethink/gorethink/query_db.go | 25 + .../GoRethink/gorethink/query_geospatial.go | 170 ++ .../GoRethink/gorethink/query_join.go | 47 + .../GoRethink/gorethink/query_manipulation.go | 121 ++ .../GoRethink/gorethink/query_math.go | 229 +++ .../GoRethink/gorethink/query_select.go | 141 ++ .../GoRethink/gorethink/query_string.go | 44 + .../GoRethink/gorethink/query_table.go | 173 ++ .../GoRethink/gorethink/query_time.go | 187 ++ .../gorethink/query_transformation.go | 193 ++ .../GoRethink/gorethink/query_write.go | 98 + .../github.com/GoRethink/gorethink/session.go | 328 ++++ .../github.com/GoRethink/gorethink/utils.go | 283 +++ vendor/github.com/Sirupsen/logrus/alt_exit.go | 64 + vendor/github.com/Sirupsen/logrus/doc.go | 26 + vendor/github.com/Sirupsen/logrus/entry.go | 275 +++ vendor/github.com/Sirupsen/logrus/exported.go | 193 ++ .../github.com/Sirupsen/logrus/formatter.go | 45 + vendor/github.com/Sirupsen/logrus/hooks.go | 34 + .../Sirupsen/logrus/json_formatter.go | 74 + vendor/github.com/Sirupsen/logrus/logger.go | 308 +++ vendor/github.com/Sirupsen/logrus/logrus.go | 143 ++ .../Sirupsen/logrus/terminal_appengine.go | 8 + .../Sirupsen/logrus/terminal_bsd.go | 10 + .../Sirupsen/logrus/terminal_linux.go | 14 + .../Sirupsen/logrus/terminal_notwindows.go | 22 + .../Sirupsen/logrus/terminal_solaris.go | 15 + .../Sirupsen/logrus/terminal_windows.go | 27 + .../Sirupsen/logrus/text_formatter.go | 170 ++ vendor/github.com/Sirupsen/logrus/writer.go | 53 + vendor/github.com/Xe/uuid/dce.go | 84 + vendor/github.com/Xe/uuid/doc.go | 8 + vendor/github.com/Xe/uuid/hash.go | 53 + vendor/github.com/Xe/uuid/node.go | 101 + vendor/github.com/Xe/uuid/time.go | 132 ++ vendor/github.com/Xe/uuid/util.go | 43 + vendor/github.com/Xe/uuid/uuid.go | 163 ++ vendor/github.com/Xe/uuid/version1.go | 41 + vendor/github.com/Xe/uuid/version4.go | 25 + .../Yawning/bulb/cmd_authenticate.go | 137 ++ vendor/github.com/Yawning/bulb/cmd_onion.go | 149 ++ .../Yawning/bulb/cmd_protocolinfo.go | 95 + vendor/github.com/Yawning/bulb/conn.go | 233 +++ vendor/github.com/Yawning/bulb/dialer.go | 54 + vendor/github.com/Yawning/bulb/listener.go | 87 + vendor/github.com/Yawning/bulb/response.go | 125 ++ vendor/github.com/Yawning/bulb/status.go | 71 + vendor/github.com/cenk/backoff/backoff.go | 66 + vendor/github.com/cenk/backoff/exponential.go | 156 ++ vendor/github.com/cenk/backoff/retry.go | 46 + vendor/github.com/cenk/backoff/ticker.go | 79 + vendor/github.com/cenkalti/backoff/backoff.go | 66 + .../cenkalti/backoff/exponential.go | 156 ++ vendor/github.com/cenkalti/backoff/retry.go | 46 + vendor/github.com/cenkalti/backoff/ticker.go | 79 + .../github.com/facebookgo/flagenv/flagenv.go | 67 + .../github.com/golang/protobuf/proto/clone.go | 229 +++ .../golang/protobuf/proto/decode.go | 970 ++++++++++ .../golang/protobuf/proto/encode.go | 1362 +++++++++++++ .../github.com/golang/protobuf/proto/equal.go | 300 +++ .../golang/protobuf/proto/extensions.go | 587 ++++++ .../github.com/golang/protobuf/proto/lib.go | 898 +++++++++ .../golang/protobuf/proto/message_set.go | 311 +++ .../golang/protobuf/proto/pointer_reflect.go | 484 +++++ .../golang/protobuf/proto/pointer_unsafe.go | 270 +++ .../golang/protobuf/proto/properties.go | 872 +++++++++ .../github.com/golang/protobuf/proto/text.go | 854 +++++++++ .../golang/protobuf/proto/text_parser.go | 895 +++++++++ .../hailocab/go-hostpool/epsilon_greedy.go | 220 +++ .../go-hostpool/epsilon_value_calculators.go | 40 + .../hailocab/go-hostpool/host_entry.go | 62 + .../hailocab/go-hostpool/hostpool.go | 243 +++ vendor/github.com/hashicorp/yamux/addr.go | 60 + vendor/github.com/hashicorp/yamux/const.go | 157 ++ vendor/github.com/hashicorp/yamux/mux.go | 87 + vendor/github.com/hashicorp/yamux/session.go | 623 ++++++ vendor/github.com/hashicorp/yamux/stream.go | 457 +++++ vendor/github.com/hashicorp/yamux/util.go | 28 + .../joho/godotenv/autoload/autoload.go | 15 + vendor/github.com/joho/godotenv/godotenv.go | 229 +++ vendor/github.com/koding/logging/context.go | 86 + vendor/github.com/koding/logging/custom.go | 40 + vendor/github.com/koding/logging/logging.go | 475 +++++ .../github.com/koding/logging/logging_unix.go | 8 + vendor/github.com/koding/logging/sink.go | 81 + vendor/github.com/koding/logging/syslog.go | 60 + .../github.com/sycamoreone/orc/tor/config.go | 80 + vendor/github.com/sycamoreone/orc/tor/exec.go | 74 + .../yawning/bulb/utils/pkcs1/rsa.go | 101 + vendor/github.com/yawning/bulb/utils/utils.go | 81 + vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go | 77 + vendor/golang.org/x/net/proxy/direct.go | 18 + vendor/golang.org/x/net/proxy/per_host.go | 140 ++ vendor/golang.org/x/net/proxy/proxy.go | 94 + vendor/golang.org/x/net/proxy/socks5.go | 210 ++ vendor/gopkg.in/fatih/pool.v2/channel.go | 131 ++ vendor/gopkg.in/fatih/pool.v2/conn.go | 43 + vendor/gopkg.in/fatih/pool.v2/pool.go | 28 + .../gorethink/gorethink.v2/encoding/cache.go | 283 +++ .../gorethink.v2/encoding/decoder.go | 152 ++ .../gorethink.v2/encoding/decoder_types.go | 555 ++++++ .../gorethink.v2/encoding/encoder.go | 89 + .../gorethink.v2/encoding/encoder_types.go | 410 ++++ .../gorethink.v2/encoding/encoding.go | 32 + .../gorethink/gorethink.v2/encoding/errors.go | 102 + .../gorethink/gorethink.v2/encoding/fold.go | 139 ++ .../gorethink/gorethink.v2/encoding/tags.go | 104 + .../gorethink/gorethink.v2/encoding/utils.go | 72 + .../gorethink/gorethink.v2/ql2/generate.go | 3 + .../gorethink/gorethink.v2/ql2/ql2.pb.go | 1681 +++++++++++++++++ .../gorethink/gorethink.v2/types/geometry.go | 225 +++ 137 files changed, 26629 insertions(+), 8 deletions(-) create mode 100644 vendor-log create mode 100644 vendor/github.com/GoRethink/gorethink/cluster.go create mode 100644 vendor/github.com/GoRethink/gorethink/cluster_integration_test.go create mode 100644 vendor/github.com/GoRethink/gorethink/cluster_test.go create mode 100644 vendor/github.com/GoRethink/gorethink/connection.go create mode 100644 vendor/github.com/GoRethink/gorethink/connection_handshake.go create mode 100644 vendor/github.com/GoRethink/gorethink/connection_helper.go create mode 100644 vendor/github.com/GoRethink/gorethink/cursor.go create mode 100644 vendor/github.com/GoRethink/gorethink/doc.go create mode 100644 vendor/github.com/GoRethink/gorethink/errors.go create mode 100644 vendor/github.com/GoRethink/gorethink/gorethink.go create mode 100644 vendor/github.com/GoRethink/gorethink/host.go create mode 100644 vendor/github.com/GoRethink/gorethink/mock.go create mode 100644 vendor/github.com/GoRethink/gorethink/node.go create mode 100644 vendor/github.com/GoRethink/gorethink/pool.go create mode 100644 vendor/github.com/GoRethink/gorethink/pseudotypes.go create mode 100644 vendor/github.com/GoRethink/gorethink/query.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_admin.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_aggregation.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_control.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_db.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_geospatial.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_join.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_manipulation.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_math.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_select.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_string.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_table.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_time.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_transformation.go create mode 100644 vendor/github.com/GoRethink/gorethink/query_write.go create mode 100644 vendor/github.com/GoRethink/gorethink/session.go create mode 100644 vendor/github.com/GoRethink/gorethink/utils.go create mode 100644 vendor/github.com/Sirupsen/logrus/alt_exit.go create mode 100644 vendor/github.com/Sirupsen/logrus/doc.go create mode 100644 vendor/github.com/Sirupsen/logrus/entry.go create mode 100644 vendor/github.com/Sirupsen/logrus/exported.go create mode 100644 vendor/github.com/Sirupsen/logrus/formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/hooks.go create mode 100644 vendor/github.com/Sirupsen/logrus/json_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/logger.go create mode 100644 vendor/github.com/Sirupsen/logrus/logrus.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_appengine.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_bsd.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_linux.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_notwindows.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_solaris.go create mode 100644 vendor/github.com/Sirupsen/logrus/terminal_windows.go create mode 100644 vendor/github.com/Sirupsen/logrus/text_formatter.go create mode 100644 vendor/github.com/Sirupsen/logrus/writer.go create mode 100644 vendor/github.com/Xe/uuid/dce.go create mode 100644 vendor/github.com/Xe/uuid/doc.go create mode 100644 vendor/github.com/Xe/uuid/hash.go create mode 100644 vendor/github.com/Xe/uuid/node.go create mode 100644 vendor/github.com/Xe/uuid/time.go create mode 100644 vendor/github.com/Xe/uuid/util.go create mode 100644 vendor/github.com/Xe/uuid/uuid.go create mode 100644 vendor/github.com/Xe/uuid/version1.go create mode 100644 vendor/github.com/Xe/uuid/version4.go create mode 100644 vendor/github.com/Yawning/bulb/cmd_authenticate.go create mode 100644 vendor/github.com/Yawning/bulb/cmd_onion.go create mode 100644 vendor/github.com/Yawning/bulb/cmd_protocolinfo.go create mode 100644 vendor/github.com/Yawning/bulb/conn.go create mode 100644 vendor/github.com/Yawning/bulb/dialer.go create mode 100644 vendor/github.com/Yawning/bulb/listener.go create mode 100644 vendor/github.com/Yawning/bulb/response.go create mode 100644 vendor/github.com/Yawning/bulb/status.go create mode 100644 vendor/github.com/cenk/backoff/backoff.go create mode 100644 vendor/github.com/cenk/backoff/exponential.go create mode 100644 vendor/github.com/cenk/backoff/retry.go create mode 100644 vendor/github.com/cenk/backoff/ticker.go create mode 100644 vendor/github.com/cenkalti/backoff/backoff.go create mode 100644 vendor/github.com/cenkalti/backoff/exponential.go create mode 100644 vendor/github.com/cenkalti/backoff/retry.go create mode 100644 vendor/github.com/cenkalti/backoff/ticker.go create mode 100644 vendor/github.com/facebookgo/flagenv/flagenv.go create mode 100644 vendor/github.com/golang/protobuf/proto/clone.go create mode 100644 vendor/github.com/golang/protobuf/proto/decode.go create mode 100644 vendor/github.com/golang/protobuf/proto/encode.go create mode 100644 vendor/github.com/golang/protobuf/proto/equal.go create mode 100644 vendor/github.com/golang/protobuf/proto/extensions.go create mode 100644 vendor/github.com/golang/protobuf/proto/lib.go create mode 100644 vendor/github.com/golang/protobuf/proto/message_set.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/golang/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/golang/protobuf/proto/properties.go create mode 100644 vendor/github.com/golang/protobuf/proto/text.go create mode 100644 vendor/github.com/golang/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go create mode 100644 vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go create mode 100644 vendor/github.com/hailocab/go-hostpool/host_entry.go create mode 100644 vendor/github.com/hailocab/go-hostpool/hostpool.go create mode 100644 vendor/github.com/hashicorp/yamux/addr.go create mode 100644 vendor/github.com/hashicorp/yamux/const.go create mode 100644 vendor/github.com/hashicorp/yamux/mux.go create mode 100644 vendor/github.com/hashicorp/yamux/session.go create mode 100644 vendor/github.com/hashicorp/yamux/stream.go create mode 100644 vendor/github.com/hashicorp/yamux/util.go create mode 100644 vendor/github.com/joho/godotenv/autoload/autoload.go create mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/github.com/koding/logging/context.go create mode 100644 vendor/github.com/koding/logging/custom.go create mode 100644 vendor/github.com/koding/logging/logging.go create mode 100644 vendor/github.com/koding/logging/logging_unix.go create mode 100644 vendor/github.com/koding/logging/sink.go create mode 100644 vendor/github.com/koding/logging/syslog.go create mode 100644 vendor/github.com/sycamoreone/orc/tor/config.go create mode 100644 vendor/github.com/sycamoreone/orc/tor/exec.go create mode 100644 vendor/github.com/yawning/bulb/utils/pkcs1/rsa.go create mode 100644 vendor/github.com/yawning/bulb/utils/utils.go create mode 100644 vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go create mode 100644 vendor/golang.org/x/net/proxy/direct.go create mode 100644 vendor/golang.org/x/net/proxy/per_host.go create mode 100644 vendor/golang.org/x/net/proxy/proxy.go create mode 100644 vendor/golang.org/x/net/proxy/socks5.go create mode 100644 vendor/gopkg.in/fatih/pool.v2/channel.go create mode 100644 vendor/gopkg.in/fatih/pool.v2/conn.go create mode 100644 vendor/gopkg.in/fatih/pool.v2/pool.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/cache.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder_types.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder_types.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoding.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/errors.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/fold.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/tags.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/encoding/utils.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/ql2/generate.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/ql2/ql2.pb.go create mode 100644 vendor/gopkg.in/gorethink/gorethink.v2/types/geometry.go diff --git a/lib/tunnel/client.go b/lib/tunnel/client.go index acd9c36..f23ddc7 100644 --- a/lib/tunnel/client.go +++ b/lib/tunnel/client.go @@ -11,10 +11,9 @@ import ( "sync/atomic" "time" - "github.com/koding/logging" "git.xeserv.us/xena/route/lib/tunnel/proto" - "github.com/hashicorp/yamux" + "github.com/koding/logging" ) //go:generate stringer -type ClientState diff --git a/lib/tunnel/helper_test.go b/lib/tunnel/helper_test.go index adab80a..3c47874 100644 --- a/lib/tunnel/helper_test.go +++ b/lib/tunnel/helper_test.go @@ -16,7 +16,6 @@ import ( "git.xeserv.us/xena/route/lib/tunnel" "git.xeserv.us/xena/route/lib/tunnel/tunneltest" - "github.com/gorilla/websocket" ) diff --git a/lib/tunnel/server.go b/lib/tunnel/server.go index b733320..dc3c9a4 100644 --- a/lib/tunnel/server.go +++ b/lib/tunnel/server.go @@ -17,10 +17,9 @@ import ( "sync" "time" - "github.com/koding/logging" "git.xeserv.us/xena/route/lib/tunnel/proto" - "github.com/hashicorp/yamux" + "github.com/koding/logging" ) var ( diff --git a/lib/tunnel/tcpproxy.go b/lib/tunnel/tcpproxy.go index 5343c24..fec0e2a 100644 --- a/lib/tunnel/tcpproxy.go +++ b/lib/tunnel/tcpproxy.go @@ -4,8 +4,8 @@ import ( "fmt" "net" - "github.com/koding/logging" "git.xeserv.us/xena/route/lib/tunnel/proto" + "github.com/koding/logging" ) var ( diff --git a/lib/tunnel/tunnel_test.go b/lib/tunnel/tunnel_test.go index c053065..5c27cbe 100644 --- a/lib/tunnel/tunnel_test.go +++ b/lib/tunnel/tunnel_test.go @@ -9,7 +9,6 @@ import ( "git.xeserv.us/xena/route/lib/tunnel" "git.xeserv.us/xena/route/lib/tunnel/tunneltest" - "github.com/cenkalti/backoff" ) diff --git a/lib/tunnel/util.go b/lib/tunnel/util.go index 154bea2..a82ad9a 100644 --- a/lib/tunnel/util.go +++ b/lib/tunnel/util.go @@ -8,7 +8,6 @@ import ( "time" "git.xeserv.us/xena/route/lib/tunnel/proto" - "github.com/cenkalti/backoff" ) diff --git a/vendor-log b/vendor-log new file mode 100644 index 0000000..7ce5d92 --- /dev/null +++ b/vendor-log @@ -0,0 +1,22 @@ +417badecf1ab14d0d6e38ad82397da2a59e2f6ca github.com/GoRethink/gorethink +9b48ece7fc373043054858f8c0d362665e866004 github.com/Sirupsen/logrus +62b230097e9c9534ca2074782b25d738c4b68964 (dirty) github.com/Xe/uuid +38b46760280b5500edd530aa39a8075bf22f9630 github.com/Yawning/bulb +b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3 github.com/cenk/backoff +b02f2bbce11d7ea6b97f282ef1771b0fe2f65ef3 github.com/cenkalti/backoff +fcd59fca7456889be7f2ad4515b7612fd6acef31 github.com/facebookgo/flagenv +8ee79997227bf9b34611aee7946ae64735e6fd93 github.com/golang/protobuf/proto +e80d13ce29ede4452c43dea11e79b9bc8a15b478 github.com/hailocab/go-hostpool +d1caa6c97c9fc1cc9e83bbe34d0603f9ff0ce8bd github.com/hashicorp/yamux +4ed13390c0acd2ff4e371e64d8b97c8954138243 github.com/joho/godotenv +4ed13390c0acd2ff4e371e64d8b97c8954138243 github.com/joho/godotenv/autoload +8b5a689ed69b1c7cd1e3595276fc2a352d7818e0 github.com/koding/logging +1627eaec269965440f742a25a627910195ad1c7a github.com/sycamoreone/orc/tor +38b46760280b5500edd530aa39a8075bf22f9630 github.com/yawning/bulb/utils +38b46760280b5500edd530aa39a8075bf22f9630 github.com/yawning/bulb/utils/pkcs1 +b8a2a83acfe6e6770b75de42d5ff4c67596675c0 golang.org/x/crypto/pbkdf2 +f2499483f923065a842d38eb4c7f1927e6fc6e6d golang.org/x/net/proxy +6e328e67893eb46323ad06f0e92cb9536babbabc gopkg.in/fatih/pool.v2 +016a1d3b4d15951ab2e39bd3596718ba94d298ba gopkg.in/gorethink/gorethink.v2/encoding +016a1d3b4d15951ab2e39bd3596718ba94d298ba gopkg.in/gorethink/gorethink.v2/ql2 +016a1d3b4d15951ab2e39bd3596718ba94d298ba gopkg.in/gorethink/gorethink.v2/types diff --git a/vendor/github.com/GoRethink/gorethink/cluster.go b/vendor/github.com/GoRethink/gorethink/cluster.go new file mode 100644 index 0000000..895b3ec --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/cluster.go @@ -0,0 +1,522 @@ +package gorethink + +import ( + "fmt" + "strings" + "sync" + "sync/atomic" + "time" + + "github.com/Sirupsen/logrus" + "github.com/cenk/backoff" + "github.com/hailocab/go-hostpool" +) + +// A Cluster represents a connection to a RethinkDB cluster, a cluster is created +// by the Session and should rarely be created manually. +// +// The cluster keeps track of all nodes in the cluster and if requested can listen +// for cluster changes and start tracking a new node if one appears. Currently +// nodes are removed from the pool if they become unhealthy (100 failed queries). +// This should hopefully soon be replaced by a backoff system. +type Cluster struct { + opts *ConnectOpts + + mu sync.RWMutex + seeds []Host // Initial host nodes specified by user. + hp hostpool.HostPool + nodes map[string]*Node // Active nodes in cluster. + closed bool + + nodeIndex int64 +} + +// NewCluster creates a new cluster by connecting to the given hosts. +func NewCluster(hosts []Host, opts *ConnectOpts) (*Cluster, error) { + c := &Cluster{ + hp: hostpool.NewEpsilonGreedy([]string{}, opts.HostDecayDuration, &hostpool.LinearEpsilonValueCalculator{}), + seeds: hosts, + opts: opts, + } + + // Attempt to connect to each host and discover any additional hosts if host + // discovery is enabled + if err := c.connectNodes(c.getSeeds()); err != nil { + return nil, err + } + + if !c.IsConnected() { + return nil, ErrNoConnectionsStarted + } + + if opts.DiscoverHosts { + go c.discover() + } + + return c, nil +} + +// Query executes a ReQL query using the cluster to connect to the database +func (c *Cluster) Query(q Query) (cursor *Cursor, err error) { + for i := 0; i < c.numRetries(); i++ { + var node *Node + var hpr hostpool.HostPoolResponse + + node, hpr, err = c.GetNextNode() + if err != nil { + return nil, err + } + + cursor, err = node.Query(q) + hpr.Mark(err) + + if !shouldRetryQuery(q, err) { + break + } + } + + return cursor, err +} + +// Exec executes a ReQL query using the cluster to connect to the database +func (c *Cluster) Exec(q Query) (err error) { + for i := 0; i < c.numRetries(); i++ { + var node *Node + var hpr hostpool.HostPoolResponse + + node, hpr, err = c.GetNextNode() + if err != nil { + return err + } + + err = node.Exec(q) + hpr.Mark(err) + + if !shouldRetryQuery(q, err) { + break + } + } + + return err +} + +// Server returns the server name and server UUID being used by a connection. +func (c *Cluster) Server() (response ServerResponse, err error) { + for i := 0; i < c.numRetries(); i++ { + var node *Node + var hpr hostpool.HostPoolResponse + + node, hpr, err = c.GetNextNode() + if err != nil { + return ServerResponse{}, err + } + + response, err = node.Server() + hpr.Mark(err) + + // This query should not fail so retry if any error is detected + if err == nil { + break + } + } + + return response, err +} + +// SetInitialPoolCap sets the initial capacity of the connection pool. +func (c *Cluster) SetInitialPoolCap(n int) { + for _, node := range c.GetNodes() { + node.SetInitialPoolCap(n) + } +} + +// SetMaxIdleConns sets the maximum number of connections in the idle +// connection pool. +func (c *Cluster) SetMaxIdleConns(n int) { + for _, node := range c.GetNodes() { + node.SetMaxIdleConns(n) + } +} + +// SetMaxOpenConns sets the maximum number of open connections to the database. +func (c *Cluster) SetMaxOpenConns(n int) { + for _, node := range c.GetNodes() { + node.SetMaxOpenConns(n) + } +} + +// Close closes the cluster +func (c *Cluster) Close(optArgs ...CloseOpts) error { + if c.closed { + return nil + } + + for _, node := range c.GetNodes() { + err := node.Close(optArgs...) + if err != nil { + return err + } + } + + c.hp.Close() + c.closed = true + + return nil +} + +// discover attempts to find new nodes in the cluster using the current nodes +func (c *Cluster) discover() { + // Keep retrying with exponential backoff. + b := backoff.NewExponentialBackOff() + // Never finish retrying (max interval is still 60s) + b.MaxElapsedTime = 0 + + // Keep trying to discover new nodes + for { + backoff.RetryNotify(func() error { + // If no hosts try seeding nodes + if len(c.GetNodes()) == 0 { + c.connectNodes(c.getSeeds()) + } + + return c.listenForNodeChanges() + }, b, func(err error, wait time.Duration) { + Log.Debugf("Error discovering hosts %s, waiting: %s", err, wait) + }) + } +} + +// listenForNodeChanges listens for changes to node status using change feeds. +// This function will block until the query fails +func (c *Cluster) listenForNodeChanges() error { + // Start listening to changes from a random active node + node, hpr, err := c.GetNextNode() + if err != nil { + return err + } + + q, err := newQuery( + DB("rethinkdb").Table("server_status").Changes(), + map[string]interface{}{}, + c.opts, + ) + if err != nil { + return fmt.Errorf("Error building query: %s", err) + } + + cursor, err := node.Query(q) + if err != nil { + hpr.Mark(err) + return err + } + + // Keep reading node status updates from changefeed + var result struct { + NewVal nodeStatus `gorethink:"new_val"` + OldVal nodeStatus `gorethink:"old_val"` + } + for cursor.Next(&result) { + addr := fmt.Sprintf("%s:%d", result.NewVal.Network.Hostname, result.NewVal.Network.ReqlPort) + addr = strings.ToLower(addr) + + switch result.NewVal.Status { + case "connected": + // Connect to node using exponential backoff (give up after waiting 5s) + // to give the node time to start-up. + b := backoff.NewExponentialBackOff() + b.MaxElapsedTime = time.Second * 5 + + backoff.Retry(func() error { + node, err := c.connectNodeWithStatus(result.NewVal) + if err == nil { + if !c.nodeExists(node) { + c.addNode(node) + + Log.WithFields(logrus.Fields{ + "id": node.ID, + "host": node.Host.String(), + }).Debug("Connected to node") + } + } + + return err + }, b) + } + } + + err = cursor.Err() + hpr.Mark(err) + return err +} + +func (c *Cluster) connectNodes(hosts []Host) error { + // Add existing nodes to map + nodeSet := map[string]*Node{} + for _, node := range c.GetNodes() { + nodeSet[node.ID] = node + } + + var attemptErr error + + // Attempt to connect to each seed host + for _, host := range hosts { + conn, err := NewConnection(host.String(), c.opts) + if err != nil { + attemptErr = err + Log.Warnf("Error creating connection: %s", err.Error()) + continue + } + defer conn.Close() + + if c.opts.DiscoverHosts { + q, err := newQuery( + DB("rethinkdb").Table("server_status"), + map[string]interface{}{}, + c.opts, + ) + if err != nil { + Log.Warnf("Error building query: %s", err) + continue + } + + _, cursor, err := conn.Query(q) + if err != nil { + attemptErr = err + Log.Warnf("Error fetching cluster status: %s", err) + continue + } + + var results []nodeStatus + err = cursor.All(&results) + if err != nil { + attemptErr = err + continue + } + + for _, result := range results { + node, err := c.connectNodeWithStatus(result) + if err == nil { + if _, ok := nodeSet[node.ID]; !ok { + Log.WithFields(logrus.Fields{ + "id": node.ID, + "host": node.Host.String(), + }).Debug("Connected to node") + nodeSet[node.ID] = node + } + } else { + attemptErr = err + Log.Warnf("Error connecting to node: %s", err) + } + } + } else { + svrRsp, err := conn.Server() + if err != nil { + attemptErr = err + Log.Warnf("Error fetching server ID: %s", err) + continue + } + + node, err := c.connectNode(svrRsp.ID, []Host{host}) + if err == nil { + if _, ok := nodeSet[node.ID]; !ok { + Log.WithFields(logrus.Fields{ + "id": node.ID, + "host": node.Host.String(), + }).Debug("Connected to node") + + nodeSet[node.ID] = node + } + } else { + attemptErr = err + Log.Warnf("Error connecting to node: %s", err) + } + } + } + + // If no nodes were contactable then return the last error, this does not + // include driver errors such as if there was an issue building the + // query + if len(nodeSet) == 0 { + return attemptErr + } + + nodes := []*Node{} + for _, node := range nodeSet { + nodes = append(nodes, node) + } + c.setNodes(nodes) + + return nil +} + +func (c *Cluster) connectNodeWithStatus(s nodeStatus) (*Node, error) { + aliases := make([]Host, len(s.Network.CanonicalAddresses)) + for i, aliasAddress := range s.Network.CanonicalAddresses { + aliases[i] = NewHost(aliasAddress.Host, int(s.Network.ReqlPort)) + } + + return c.connectNode(s.ID, aliases) +} + +func (c *Cluster) connectNode(id string, aliases []Host) (*Node, error) { + var pool *Pool + var err error + + for len(aliases) > 0 { + pool, err = NewPool(aliases[0], c.opts) + if err != nil { + aliases = aliases[1:] + continue + } + + err = pool.Ping() + if err != nil { + aliases = aliases[1:] + continue + } + + // Ping successful so break out of loop + break + } + + if err != nil { + return nil, err + } + if len(aliases) == 0 { + return nil, ErrInvalidNode + } + + return newNode(id, aliases, c, pool), nil +} + +// IsConnected returns true if cluster has nodes and is not already closed. +func (c *Cluster) IsConnected() bool { + c.mu.RLock() + closed := c.closed + c.mu.RUnlock() + + return (len(c.GetNodes()) > 0) && !closed +} + +// AddSeeds adds new seed hosts to the cluster. +func (c *Cluster) AddSeeds(hosts []Host) { + c.mu.Lock() + c.seeds = append(c.seeds, hosts...) + c.mu.Unlock() +} + +func (c *Cluster) getSeeds() []Host { + c.mu.RLock() + seeds := c.seeds + c.mu.RUnlock() + + return seeds +} + +// GetNextNode returns a random node on the cluster +func (c *Cluster) GetNextNode() (*Node, hostpool.HostPoolResponse, error) { + if !c.IsConnected() { + return nil, nil, ErrNoConnections + } + c.mu.RLock() + defer c.mu.RUnlock() + + nodes := c.nodes + hpr := c.hp.Get() + if n, ok := nodes[hpr.Host()]; ok { + if !n.Closed() { + return n, hpr, nil + } + } + + return nil, nil, ErrNoConnections +} + +// GetNodes returns a list of all nodes in the cluster +func (c *Cluster) GetNodes() []*Node { + c.mu.RLock() + nodes := make([]*Node, 0, len(c.nodes)) + for _, n := range c.nodes { + nodes = append(nodes, n) + } + c.mu.RUnlock() + + return nodes +} + +func (c *Cluster) nodeExists(search *Node) bool { + for _, node := range c.GetNodes() { + if node.ID == search.ID { + return true + } + } + return false +} + +func (c *Cluster) addNode(node *Node) { + c.mu.RLock() + nodes := append(c.GetNodes(), node) + c.mu.RUnlock() + + c.setNodes(nodes) +} + +func (c *Cluster) addNodes(nodesToAdd []*Node) { + c.mu.RLock() + nodes := append(c.GetNodes(), nodesToAdd...) + c.mu.RUnlock() + + c.setNodes(nodes) +} + +func (c *Cluster) setNodes(nodes []*Node) { + nodesMap := make(map[string]*Node, len(nodes)) + hosts := make([]string, len(nodes)) + for i, node := range nodes { + host := node.Host.String() + + nodesMap[host] = node + hosts[i] = host + } + + c.mu.Lock() + c.nodes = nodesMap + c.hp.SetHosts(hosts) + c.mu.Unlock() +} + +func (c *Cluster) removeNode(nodeID string) { + nodes := c.GetNodes() + nodeArray := make([]*Node, len(nodes)-1) + count := 0 + + // Add nodes that are not in remove list. + for _, n := range nodes { + if n.ID != nodeID { + nodeArray[count] = n + count++ + } + } + + // Do sanity check to make sure assumptions are correct. + if count < len(nodeArray) { + // Resize array. + nodeArray2 := make([]*Node, count) + copy(nodeArray2, nodeArray) + nodeArray = nodeArray2 + } + + c.setNodes(nodeArray) +} + +func (c *Cluster) nextNodeIndex() int64 { + return atomic.AddInt64(&c.nodeIndex, 1) +} + +func (c *Cluster) numRetries() int { + if n := c.opts.NumRetries; n > 0 { + return n + } + + return 3 +} diff --git a/vendor/github.com/GoRethink/gorethink/cluster_integration_test.go b/vendor/github.com/GoRethink/gorethink/cluster_integration_test.go new file mode 100644 index 0000000..929a235 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/cluster_integration_test.go @@ -0,0 +1,99 @@ +// +build cluster +// +build integration + +package gorethink + +import ( + "time" + + test "gopkg.in/check.v1" +) + +func (s *RethinkSuite) TestClusterDetectNewNode(c *test.C) { + session, err := Connect(ConnectOpts{ + Addresses: []string{url, url2}, + DiscoverHosts: true, + NodeRefreshInterval: time.Second, + }) + c.Assert(err, test.IsNil) + + t := time.NewTimer(time.Second * 30) + for { + select { + // Fail if deadline has passed + case <-t.C: + c.Fatal("No node was added to the cluster") + default: + // Pass if another node was added + if len(session.cluster.GetNodes()) >= 3 { + return + } + } + } +} + +func (s *RethinkSuite) TestClusterRecoverAfterNoNodes(c *test.C) { + session, err := Connect(ConnectOpts{ + Addresses: []string{url, url2}, + DiscoverHosts: true, + NodeRefreshInterval: time.Second, + }) + c.Assert(err, test.IsNil) + + t := time.NewTimer(time.Second * 30) + hasHadZeroNodes := false + for { + select { + // Fail if deadline has passed + case <-t.C: + c.Fatal("No node was added to the cluster") + default: + // Check if there are no nodes + if len(session.cluster.GetNodes()) == 0 { + hasHadZeroNodes = true + } + + // Pass if another node was added + if len(session.cluster.GetNodes()) >= 1 && hasHadZeroNodes { + return + } + } + } +} + +func (s *RethinkSuite) TestClusterNodeHealth(c *test.C) { + session, err := Connect(ConnectOpts{ + Addresses: []string{url1, url2, url3}, + DiscoverHosts: true, + NodeRefreshInterval: time.Second, + InitialCap: 50, + MaxOpen: 200, + }) + c.Assert(err, test.IsNil) + + attempts := 0 + failed := 0 + seconds := 0 + + t := time.NewTimer(time.Second * 30) + tick := time.NewTicker(time.Second) + for { + select { + // Fail if deadline has passed + case <-tick.C: + seconds++ + c.Logf("%ds elapsed", seconds) + case <-t.C: + // Execute queries for 10s and check that at most 5% of the queries fail + c.Logf("%d of the %d(%d%%) queries failed", failed, attempts, (failed / attempts)) + c.Assert(failed <= 100, test.Equals, true) + return + default: + attempts++ + if err := Expr(1).Exec(session); err != nil { + c.Logf("Query failed, %s", err) + failed++ + } + } + } +} diff --git a/vendor/github.com/GoRethink/gorethink/cluster_test.go b/vendor/github.com/GoRethink/gorethink/cluster_test.go new file mode 100644 index 0000000..d7f334f --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/cluster_test.go @@ -0,0 +1,63 @@ +// +build cluster + +package gorethink + +import ( + "fmt" + "time" + + test "gopkg.in/check.v1" +) + +func (s *RethinkSuite) TestClusterConnect(c *test.C) { + session, err := Connect(ConnectOpts{ + Addresses: []string{url1, url2, url3}, + }) + c.Assert(err, test.IsNil) + + row, err := Expr("Hello World").Run(session) + c.Assert(err, test.IsNil) + + var response string + err = row.One(&response) + c.Assert(err, test.IsNil) + c.Assert(response, test.Equals, "Hello World") +} + +func (s *RethinkSuite) TestClusterMultipleQueries(c *test.C) { + session, err := Connect(ConnectOpts{ + Addresses: []string{url1, url2, url3}, + }) + c.Assert(err, test.IsNil) + + for i := 0; i < 1000; i++ { + row, err := Expr(fmt.Sprintf("Hello World", i)).Run(session) + c.Assert(err, test.IsNil) + + var response string + err = row.One(&response) + c.Assert(err, test.IsNil) + c.Assert(response, test.Equals, fmt.Sprintf("Hello World", i)) + } +} + +func (s *RethinkSuite) TestClusterConnectError(c *test.C) { + var err error + _, err = Connect(ConnectOpts{ + Addresses: []string{"nonexistanturl"}, + Timeout: time.Second, + }) + c.Assert(err, test.NotNil) +} + +func (s *RethinkSuite) TestClusterConnectDatabase(c *test.C) { + session, err := Connect(ConnectOpts{ + Addresses: []string{url1, url2, url3}, + Database: "test2", + }) + c.Assert(err, test.IsNil) + + _, err = Table("test2").Run(session) + c.Assert(err, test.NotNil) + c.Assert(err.Error(), test.Equals, "gorethink: Database `test2` does not exist. in:\nr.Table(\"test2\")") +} diff --git a/vendor/github.com/GoRethink/gorethink/connection.go b/vendor/github.com/GoRethink/gorethink/connection.go new file mode 100644 index 0000000..9147ae1 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/connection.go @@ -0,0 +1,381 @@ +package gorethink + +import ( + "crypto/tls" + "encoding/binary" + "encoding/json" + "net" + "sync" + "sync/atomic" + "time" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +const ( + respHeaderLen = 12 + defaultKeepAlivePeriod = time.Second * 30 +) + +// Response represents the raw response from a query, most of the time you +// should instead use a Cursor when reading from the database. +type Response struct { + Token int64 + Type p.Response_ResponseType `json:"t"` + ErrorType p.Response_ErrorType `json:"e"` + Notes []p.Response_ResponseNote `json:"n"` + Responses []json.RawMessage `json:"r"` + Backtrace []interface{} `json:"b"` + Profile interface{} `json:"p"` +} + +// Connection is a connection to a rethinkdb database. Connection is not thread +// safe and should only be accessed be a single goroutine +type Connection struct { + net.Conn + + address string + opts *ConnectOpts + + _ [4]byte + mu sync.Mutex + token int64 + cursors map[int64]*Cursor + bad bool + closed bool +} + +// NewConnection creates a new connection to the database server +func NewConnection(address string, opts *ConnectOpts) (*Connection, error) { + var err error + c := &Connection{ + address: address, + opts: opts, + cursors: make(map[int64]*Cursor), + } + + keepAlivePeriod := defaultKeepAlivePeriod + if opts.KeepAlivePeriod > 0 { + keepAlivePeriod = opts.KeepAlivePeriod + } + + // Connect to Server + nd := net.Dialer{Timeout: c.opts.Timeout, KeepAlive: keepAlivePeriod} + if c.opts.TLSConfig == nil { + c.Conn, err = nd.Dial("tcp", address) + } else { + c.Conn, err = tls.DialWithDialer(&nd, "tcp", address, c.opts.TLSConfig) + } + if err != nil { + return nil, RQLConnectionError{rqlError(err.Error())} + } + + // Send handshake + handshake, err := c.handshake(opts.HandshakeVersion) + if err != nil { + return nil, err + } + + if err = handshake.Send(); err != nil { + return nil, err + } + + return c, nil +} + +// Close closes the underlying net.Conn +func (c *Connection) Close() error { + c.mu.Lock() + defer c.mu.Unlock() + + var err error + + if !c.closed { + err = c.Conn.Close() + c.closed = true + c.cursors = make(map[int64]*Cursor) + } + + return err +} + +// Query sends a Query to the database, returning both the raw Response and a +// Cursor which should be used to view the query's response. +// +// This function is used internally by Run which should be used for most queries. +func (c *Connection) Query(q Query) (*Response, *Cursor, error) { + if c == nil { + return nil, nil, ErrConnectionClosed + } + c.mu.Lock() + if c.Conn == nil { + c.bad = true + c.mu.Unlock() + return nil, nil, ErrConnectionClosed + } + + // Add token if query is a START/NOREPLY_WAIT + if q.Type == p.Query_START || q.Type == p.Query_NOREPLY_WAIT || q.Type == p.Query_SERVER_INFO { + q.Token = c.nextToken() + } + if q.Type == p.Query_START || q.Type == p.Query_NOREPLY_WAIT { + if c.opts.Database != "" { + var err error + q.Opts["db"], err = DB(c.opts.Database).Build() + if err != nil { + c.mu.Unlock() + return nil, nil, RQLDriverError{rqlError(err.Error())} + } + } + } + c.mu.Unlock() + + err := c.sendQuery(q) + if err != nil { + return nil, nil, err + } + + if noreply, ok := q.Opts["noreply"]; ok && noreply.(bool) { + return nil, nil, nil + } + + for { + response, err := c.readResponse() + if err != nil { + return nil, nil, err + } + + if response.Token == q.Token { + // If this was the requested response process and return + return c.processResponse(q, response) + } else if _, ok := c.cursors[response.Token]; ok { + // If the token is in the cursor cache then process the response + c.processResponse(q, response) + } else { + putResponse(response) + } + } +} + +type ServerResponse struct { + ID string `gorethink:"id"` + Name string `gorethink:"name"` +} + +// Server returns the server name and server UUID being used by a connection. +func (c *Connection) Server() (ServerResponse, error) { + var response ServerResponse + + _, cur, err := c.Query(Query{ + Type: p.Query_SERVER_INFO, + }) + if err != nil { + return response, err + } + + if err = cur.One(&response); err != nil { + return response, err + } + + if err = cur.Close(); err != nil { + return response, err + } + + return response, nil +} + +// sendQuery marshals the Query and sends the JSON to the server. +func (c *Connection) sendQuery(q Query) error { + // Build query + b, err := json.Marshal(q.Build()) + if err != nil { + return RQLDriverError{rqlError("Error building query")} + } + + // Set timeout + if c.opts.WriteTimeout == 0 { + c.Conn.SetWriteDeadline(time.Time{}) + } else { + c.Conn.SetWriteDeadline(time.Now().Add(c.opts.WriteTimeout)) + } + + // Send the JSON encoding of the query itself. + if err = c.writeQuery(q.Token, b); err != nil { + c.bad = true + return RQLConnectionError{rqlError(err.Error())} + } + + return nil +} + +// getToken generates the next query token, used to number requests and match +// responses with requests. +func (c *Connection) nextToken() int64 { + // requires c.token to be 64-bit aligned on ARM + return atomic.AddInt64(&c.token, 1) +} + +// readResponse attempts to read a Response from the server, if no response +// could be read then an error is returned. +func (c *Connection) readResponse() (*Response, error) { + // Set timeout + if c.opts.ReadTimeout == 0 { + c.Conn.SetReadDeadline(time.Time{}) + } else { + c.Conn.SetReadDeadline(time.Now().Add(c.opts.ReadTimeout)) + } + + // Read response header (token+length) + headerBuf := [respHeaderLen]byte{} + if _, err := c.read(headerBuf[:], respHeaderLen); err != nil { + c.bad = true + return nil, RQLConnectionError{rqlError(err.Error())} + } + + responseToken := int64(binary.LittleEndian.Uint64(headerBuf[:8])) + messageLength := binary.LittleEndian.Uint32(headerBuf[8:]) + + // Read the JSON encoding of the Response itself. + b := make([]byte, int(messageLength)) + + if _, err := c.read(b, int(messageLength)); err != nil { + c.bad = true + return nil, RQLConnectionError{rqlError(err.Error())} + } + + // Decode the response + var response = newCachedResponse() + if err := json.Unmarshal(b, response); err != nil { + c.bad = true + return nil, RQLDriverError{rqlError(err.Error())} + } + response.Token = responseToken + + return response, nil +} + +func (c *Connection) processResponse(q Query, response *Response) (*Response, *Cursor, error) { + switch response.Type { + case p.Response_CLIENT_ERROR: + return c.processErrorResponse(q, response, RQLClientError{rqlServerError{response, q.Term}}) + case p.Response_COMPILE_ERROR: + return c.processErrorResponse(q, response, RQLCompileError{rqlServerError{response, q.Term}}) + case p.Response_RUNTIME_ERROR: + return c.processErrorResponse(q, response, createRuntimeError(response.ErrorType, response, q.Term)) + case p.Response_SUCCESS_ATOM, p.Response_SERVER_INFO: + return c.processAtomResponse(q, response) + case p.Response_SUCCESS_PARTIAL: + return c.processPartialResponse(q, response) + case p.Response_SUCCESS_SEQUENCE: + return c.processSequenceResponse(q, response) + case p.Response_WAIT_COMPLETE: + return c.processWaitResponse(q, response) + default: + putResponse(response) + return nil, nil, RQLDriverError{rqlError("Unexpected response type")} + } +} + +func (c *Connection) processErrorResponse(q Query, response *Response, err error) (*Response, *Cursor, error) { + c.mu.Lock() + cursor := c.cursors[response.Token] + + delete(c.cursors, response.Token) + c.mu.Unlock() + + return response, cursor, err +} + +func (c *Connection) processAtomResponse(q Query, response *Response) (*Response, *Cursor, error) { + // Create cursor + cursor := newCursor(c, "Cursor", response.Token, q.Term, q.Opts) + cursor.profile = response.Profile + + cursor.extend(response) + + return response, cursor, nil +} + +func (c *Connection) processPartialResponse(q Query, response *Response) (*Response, *Cursor, error) { + cursorType := "Cursor" + if len(response.Notes) > 0 { + switch response.Notes[0] { + case p.Response_SEQUENCE_FEED: + cursorType = "Feed" + case p.Response_ATOM_FEED: + cursorType = "AtomFeed" + case p.Response_ORDER_BY_LIMIT_FEED: + cursorType = "OrderByLimitFeed" + case p.Response_UNIONED_FEED: + cursorType = "UnionedFeed" + case p.Response_INCLUDES_STATES: + cursorType = "IncludesFeed" + } + } + + c.mu.Lock() + cursor, ok := c.cursors[response.Token] + if !ok { + // Create a new cursor if needed + cursor = newCursor(c, cursorType, response.Token, q.Term, q.Opts) + cursor.profile = response.Profile + + c.cursors[response.Token] = cursor + } + c.mu.Unlock() + + cursor.extend(response) + + return response, cursor, nil +} + +func (c *Connection) processSequenceResponse(q Query, response *Response) (*Response, *Cursor, error) { + c.mu.Lock() + cursor, ok := c.cursors[response.Token] + if !ok { + // Create a new cursor if needed + cursor = newCursor(c, "Cursor", response.Token, q.Term, q.Opts) + cursor.profile = response.Profile + } + + delete(c.cursors, response.Token) + c.mu.Unlock() + + cursor.extend(response) + + return response, cursor, nil +} + +func (c *Connection) processWaitResponse(q Query, response *Response) (*Response, *Cursor, error) { + c.mu.Lock() + delete(c.cursors, response.Token) + c.mu.Unlock() + + return response, nil, nil +} + +func (c *Connection) isBad() bool { + c.mu.Lock() + defer c.mu.Unlock() + + return c.bad +} + +var responseCache = make(chan *Response, 16) + +func newCachedResponse() *Response { + select { + case r := <-responseCache: + return r + default: + return new(Response) + } +} + +func putResponse(r *Response) { + *r = Response{} // zero it + select { + case responseCache <- r: + default: + } +} diff --git a/vendor/github.com/GoRethink/gorethink/connection_handshake.go b/vendor/github.com/GoRethink/gorethink/connection_handshake.go new file mode 100644 index 0000000..96e417f --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/connection_handshake.go @@ -0,0 +1,450 @@ +package gorethink + +import ( + "bufio" + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + "strings" + + "golang.org/x/crypto/pbkdf2" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +type HandshakeVersion int + +const ( + HandshakeV1_0 HandshakeVersion = iota + HandshakeV0_4 +) + +type connectionHandshake interface { + Send() error +} + +func (c *Connection) handshake(version HandshakeVersion) (connectionHandshake, error) { + switch version { + case HandshakeV0_4: + return &connectionHandshakeV0_4{conn: c}, nil + case HandshakeV1_0: + return &connectionHandshakeV1_0{conn: c}, nil + default: + return nil, fmt.Errorf("Unrecognised handshake version") + } +} + +type connectionHandshakeV0_4 struct { + conn *Connection +} + +func (c *connectionHandshakeV0_4) Send() error { + // Send handshake request + if err := c.writeHandshakeReq(); err != nil { + c.conn.Close() + return RQLConnectionError{rqlError(err.Error())} + } + // Read handshake response + if err := c.readHandshakeSuccess(); err != nil { + c.conn.Close() + return RQLConnectionError{rqlError(err.Error())} + } + + return nil +} + +func (c *connectionHandshakeV0_4) writeHandshakeReq() error { + pos := 0 + dataLen := 4 + 4 + len(c.conn.opts.AuthKey) + 4 + data := make([]byte, dataLen) + + // Send the protocol version to the server as a 4-byte little-endian-encoded integer + binary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_V0_4)) + pos += 4 + + // Send the length of the auth key to the server as a 4-byte little-endian-encoded integer + binary.LittleEndian.PutUint32(data[pos:], uint32(len(c.conn.opts.AuthKey))) + pos += 4 + + // Send the auth key as an ASCII string + if len(c.conn.opts.AuthKey) > 0 { + pos += copy(data[pos:], c.conn.opts.AuthKey) + } + + // Send the protocol type as a 4-byte little-endian-encoded integer + binary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_JSON)) + pos += 4 + + return c.conn.writeData(data) +} + +func (c *connectionHandshakeV0_4) readHandshakeSuccess() error { + reader := bufio.NewReader(c.conn.Conn) + line, err := reader.ReadBytes('\x00') + if err != nil { + if err == io.EOF { + return fmt.Errorf("Unexpected EOF: %s", string(line)) + } + return err + } + // convert to string and remove trailing NUL byte + response := string(line[:len(line)-1]) + if response != "SUCCESS" { + response = strings.TrimSpace(response) + // we failed authorization or something else terrible happened + return RQLDriverError{rqlError(fmt.Sprintf("Server dropped connection with message: \"%s\"", response))} + } + + return nil +} + +const ( + handshakeV1_0_protocolVersionNumber = 0 + handshakeV1_0_authenticationMethod = "SCRAM-SHA-256" +) + +type connectionHandshakeV1_0 struct { + conn *Connection + reader *bufio.Reader + + authMsg string +} + +func (c *connectionHandshakeV1_0) Send() error { + c.reader = bufio.NewReader(c.conn.Conn) + + // Generate client nonce + clientNonce, err := c.generateNonce() + if err != nil { + c.conn.Close() + return RQLDriverError{rqlError(fmt.Sprintf("Failed to generate client nonce: %s", err))} + } + // Send client first message + if err := c.writeFirstMessage(clientNonce); err != nil { + c.conn.Close() + return err + } + // Read status + if err := c.checkServerVersions(); err != nil { + c.conn.Close() + return err + } + + // Read server first message + i, salt, serverNonce, err := c.readFirstMessage() + if err != nil { + c.conn.Close() + return err + } + + // Check server nonce + if !strings.HasPrefix(serverNonce, clientNonce) { + return RQLAuthError{RQLDriverError{rqlError("Invalid nonce from server")}} + } + + // Generate proof + saltedPass := c.saltPassword(i, salt) + clientProof := c.calculateProof(saltedPass, clientNonce, serverNonce) + serverSignature := c.serverSignature(saltedPass) + + // Send client final message + if err := c.writeFinalMessage(serverNonce, clientProof); err != nil { + c.conn.Close() + return err + } + // Read server final message + if err := c.readFinalMessage(serverSignature); err != nil { + c.conn.Close() + return err + } + + return nil +} + +func (c *connectionHandshakeV1_0) writeFirstMessage(clientNonce string) error { + // Default username to admin if not set + username := "admin" + if c.conn.opts.Username != "" { + username = c.conn.opts.Username + } + + c.authMsg = fmt.Sprintf("n=%s,r=%s", username, clientNonce) + msg := fmt.Sprintf( + `{"protocol_version": %d,"authentication": "n,,%s","authentication_method": "%s"}`, + handshakeV1_0_protocolVersionNumber, c.authMsg, handshakeV1_0_authenticationMethod, + ) + + pos := 0 + dataLen := 4 + len(msg) + 1 + data := make([]byte, dataLen) + + // Send the protocol version to the server as a 4-byte little-endian-encoded integer + binary.LittleEndian.PutUint32(data[pos:], uint32(p.VersionDummy_V1_0)) + pos += 4 + + // Send the auth message as an ASCII string + pos += copy(data[pos:], msg) + + // Add null terminating byte + data[pos] = '\x00' + + return c.writeData(data) +} + +func (c *connectionHandshakeV1_0) checkServerVersions() error { + b, err := c.readResponse() + if err != nil { + return err + } + + // Read status + type versionsResponse struct { + Success bool `json:"success"` + MinProtocolVersion int `json:"min_protocol_version"` + MaxProtocolVersion int `json:"max_protocol_version"` + ServerVersion string `json:"server_version"` + ErrorCode int `json:"error_code"` + Error string `json:"error"` + } + var rsp *versionsResponse + statusStr := string(b) + + if err := json.Unmarshal(b, &rsp); err != nil { + if strings.HasPrefix(statusStr, "ERROR: ") { + statusStr = strings.TrimPrefix(statusStr, "ERROR: ") + return RQLConnectionError{rqlError(statusStr)} + } + + return RQLDriverError{rqlError(fmt.Sprintf("Error reading versions: %s", err))} + } + + if !rsp.Success { + return c.handshakeError(rsp.ErrorCode, rsp.Error) + } + if rsp.MinProtocolVersion > handshakeV1_0_protocolVersionNumber || + rsp.MaxProtocolVersion < handshakeV1_0_protocolVersionNumber { + return RQLDriverError{rqlError( + fmt.Sprintf( + "Unsupported protocol version %d, expected between %d and %d.", + handshakeV1_0_protocolVersionNumber, + rsp.MinProtocolVersion, + rsp.MaxProtocolVersion, + ), + )} + } + + return nil +} + +func (c *connectionHandshakeV1_0) readFirstMessage() (i int64, salt []byte, serverNonce string, err error) { + b, err2 := c.readResponse() + if err2 != nil { + err = err2 + return + } + + // Read server message + type firstMessageResponse struct { + Success bool `json:"success"` + Authentication string `json:"authentication"` + ErrorCode int `json:"error_code"` + Error string `json:"error"` + } + var rsp *firstMessageResponse + + if err2 := json.Unmarshal(b, &rsp); err2 != nil { + err = RQLDriverError{rqlError(fmt.Sprintf("Error parsing auth response: %s", err2))} + return + } + if !rsp.Success { + err = c.handshakeError(rsp.ErrorCode, rsp.Error) + return + } + + c.authMsg += "," + c.authMsg += rsp.Authentication + + // Parse authentication field + auth := map[string]string{} + parts := strings.Split(rsp.Authentication, ",") + for _, part := range parts { + i := strings.Index(part, "=") + if i != -1 { + auth[part[:i]] = part[i+1:] + } + } + + // Extract return values + if v, ok := auth["i"]; ok { + i, err = strconv.ParseInt(v, 10, 64) + if err != nil { + return + } + } + if v, ok := auth["s"]; ok { + salt, err = base64.StdEncoding.DecodeString(v) + if err != nil { + return + } + } + if v, ok := auth["r"]; ok { + serverNonce = v + } + + return +} + +func (c *connectionHandshakeV1_0) writeFinalMessage(serverNonce, clientProof string) error { + authMsg := "c=biws,r=" + authMsg += serverNonce + authMsg += ",p=" + authMsg += clientProof + + msg := fmt.Sprintf(`{"authentication": "%s"}`, authMsg) + + pos := 0 + dataLen := len(msg) + 1 + data := make([]byte, dataLen) + + // Send the auth message as an ASCII string + pos += copy(data[pos:], msg) + + // Add null terminating byte + data[pos] = '\x00' + + return c.writeData(data) +} + +func (c *connectionHandshakeV1_0) readFinalMessage(serverSignature string) error { + b, err := c.readResponse() + if err != nil { + return err + } + + // Read server message + type finalMessageResponse struct { + Success bool `json:"success"` + Authentication string `json:"authentication"` + ErrorCode int `json:"error_code"` + Error string `json:"error"` + } + var rsp *finalMessageResponse + + if err := json.Unmarshal(b, &rsp); err != nil { + return RQLDriverError{rqlError(fmt.Sprintf("Error parsing auth response: %s", err))} + } + if !rsp.Success { + return c.handshakeError(rsp.ErrorCode, rsp.Error) + } + + // Parse authentication field + auth := map[string]string{} + parts := strings.Split(rsp.Authentication, ",") + for _, part := range parts { + i := strings.Index(part, "=") + if i != -1 { + auth[part[:i]] = part[i+1:] + } + } + + // Validate server response + if serverSignature != auth["v"] { + return RQLAuthError{RQLDriverError{rqlError("Invalid server signature")}} + } + + return nil +} + +func (c *connectionHandshakeV1_0) writeData(data []byte) error { + + if err := c.conn.writeData(data); err != nil { + return RQLConnectionError{rqlError(err.Error())} + } + + return nil +} + +func (c *connectionHandshakeV1_0) readResponse() ([]byte, error) { + line, err := c.reader.ReadBytes('\x00') + if err != nil { + if err == io.EOF { + return nil, RQLConnectionError{rqlError(fmt.Sprintf("Unexpected EOF: %s", string(line)))} + } + return nil, RQLConnectionError{rqlError(err.Error())} + } + + // Strip null byte and return + return line[:len(line)-1], nil +} + +func (c *connectionHandshakeV1_0) generateNonce() (string, error) { + const nonceSize = 24 + + b := make([]byte, nonceSize) + _, err := rand.Read(b) + if err != nil { + return "", err + } + + return base64.StdEncoding.EncodeToString(b), nil +} + +func (c *connectionHandshakeV1_0) saltPassword(iter int64, salt []byte) []byte { + pass := []byte(c.conn.opts.Password) + + return pbkdf2.Key(pass, salt, int(iter), sha256.Size, sha256.New) +} + +func (c *connectionHandshakeV1_0) calculateProof(saltedPass []byte, clientNonce, serverNonce string) string { + // Generate proof + c.authMsg += ",c=biws,r=" + serverNonce + + mac := hmac.New(c.hashFunc(), saltedPass) + mac.Write([]byte("Client Key")) + clientKey := mac.Sum(nil) + + hash := c.hashFunc()() + hash.Write(clientKey) + storedKey := hash.Sum(nil) + + mac = hmac.New(c.hashFunc(), storedKey) + mac.Write([]byte(c.authMsg)) + clientSignature := mac.Sum(nil) + clientProof := make([]byte, len(clientKey)) + for i, _ := range clientKey { + clientProof[i] = clientKey[i] ^ clientSignature[i] + } + + return base64.StdEncoding.EncodeToString(clientProof) +} + +func (c *connectionHandshakeV1_0) serverSignature(saltedPass []byte) string { + mac := hmac.New(c.hashFunc(), saltedPass) + mac.Write([]byte("Server Key")) + serverKey := mac.Sum(nil) + + mac = hmac.New(c.hashFunc(), serverKey) + mac.Write([]byte(c.authMsg)) + serverSignature := mac.Sum(nil) + + return base64.StdEncoding.EncodeToString(serverSignature) +} + +func (c *connectionHandshakeV1_0) handshakeError(code int, message string) error { + if code >= 10 || code <= 20 { + return RQLAuthError{RQLDriverError{rqlError(message)}} + } + + return RQLDriverError{rqlError(message)} +} + +func (c *connectionHandshakeV1_0) hashFunc() func() hash.Hash { + return sha256.New +} diff --git a/vendor/github.com/GoRethink/gorethink/connection_helper.go b/vendor/github.com/GoRethink/gorethink/connection_helper.go new file mode 100644 index 0000000..6846070 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/connection_helper.go @@ -0,0 +1,41 @@ +package gorethink + +import "encoding/binary" + +// Write 'data' to conn +func (c *Connection) writeData(data []byte) error { + _, err := c.Conn.Write(data[:]) + + return err +} + +func (c *Connection) read(buf []byte, length int) (total int, err error) { + var n int + for total < length { + if n, err = c.Conn.Read(buf[total:length]); err != nil { + break + } + total += n + } + + return total, err +} + +func (c *Connection) writeQuery(token int64, q []byte) error { + pos := 0 + dataLen := 8 + 4 + len(q) + data := make([]byte, dataLen) + + // Send the protocol version to the server as a 4-byte little-endian-encoded integer + binary.LittleEndian.PutUint64(data[pos:], uint64(token)) + pos += 8 + + // Send the length of the auth key to the server as a 4-byte little-endian-encoded integer + binary.LittleEndian.PutUint32(data[pos:], uint32(len(q))) + pos += 4 + + // Send the auth key as an ASCII string + pos += copy(data[pos:], q) + + return c.writeData(data) +} diff --git a/vendor/github.com/GoRethink/gorethink/cursor.go b/vendor/github.com/GoRethink/gorethink/cursor.go new file mode 100644 index 0000000..2912a43 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/cursor.go @@ -0,0 +1,710 @@ +package gorethink + +import ( + "bytes" + "encoding/json" + "errors" + "reflect" + "sync" + + "gopkg.in/gorethink/gorethink.v2/encoding" + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +var ( + errNilCursor = errors.New("cursor is nil") + errCursorClosed = errors.New("connection closed, cannot read cursor") +) + +func newCursor(conn *Connection, cursorType string, token int64, term *Term, opts map[string]interface{}) *Cursor { + if cursorType == "" { + cursorType = "Cursor" + } + + connOpts := &ConnectOpts{} + if conn != nil { + connOpts = conn.opts + } + + cursor := &Cursor{ + conn: conn, + connOpts: connOpts, + token: token, + cursorType: cursorType, + term: term, + opts: opts, + buffer: make([]interface{}, 0), + responses: make([]json.RawMessage, 0), + } + + return cursor +} + +// Cursor is the result of a query. Its cursor starts before the first row +// of the result set. A Cursor is not thread safe and should only be accessed +// by a single goroutine at any given time. Use Next to advance through the +// rows: +// +// cursor, err := query.Run(session) +// ... +// defer cursor.Close() +// +// var response interface{} +// for cursor.Next(&response) { +// ... +// } +// err = cursor.Err() // get any error encountered during iteration +// ... +type Cursor struct { + releaseConn func() error + + conn *Connection + connOpts *ConnectOpts + token int64 + cursorType string + term *Term + opts map[string]interface{} + + mu sync.RWMutex + lastErr error + fetching bool + closed bool + finished bool + isAtom bool + isSingleValue bool + pendingSkips int + buffer []interface{} + responses []json.RawMessage + profile interface{} +} + +// Profile returns the information returned from the query profiler. +func (c *Cursor) Profile() interface{} { + if c == nil { + return nil + } + + c.mu.RLock() + defer c.mu.RUnlock() + + return c.profile +} + +// Type returns the cursor type (by default "Cursor") +func (c *Cursor) Type() string { + if c == nil { + return "Cursor" + } + + c.mu.RLock() + defer c.mu.RUnlock() + + return c.cursorType +} + +// Err returns nil if no errors happened during iteration, or the actual +// error otherwise. +func (c *Cursor) Err() error { + if c == nil { + return errNilCursor + } + + c.mu.RLock() + defer c.mu.RUnlock() + + return c.lastErr +} + +// Close closes the cursor, preventing further enumeration. If the end is +// encountered, the cursor is closed automatically. Close is idempotent. +func (c *Cursor) Close() error { + if c == nil { + return errNilCursor + } + + c.mu.Lock() + defer c.mu.Unlock() + + var err error + + // If cursor is already closed return immediately + closed := c.closed + if closed { + return nil + } + + // Get connection and check its valid, don't need to lock as this is only + // set when the cursor is created + conn := c.conn + if conn == nil { + return nil + } + if conn.Conn == nil { + return nil + } + + // Stop any unfinished queries + if !c.finished { + q := Query{ + Type: p.Query_STOP, + Token: c.token, + Opts: map[string]interface{}{ + "noreply": true, + }, + } + + _, _, err = conn.Query(q) + } + + if c.releaseConn != nil { + if err := c.releaseConn(); err != nil { + return err + } + } + + c.closed = true + c.conn = nil + c.buffer = nil + c.responses = nil + + return err +} + +// Next retrieves the next document from the result set, blocking if necessary. +// This method will also automatically retrieve another batch of documents from +// the server when the current one is exhausted, or before that in background +// if possible. +// +// Next returns true if a document was successfully unmarshalled onto result, +// and false at the end of the result set or if an error happened. +// When Next returns false, the Err method should be called to verify if +// there was an error during iteration. +// +// Also note that you are able to reuse the same variable multiple times as +// `Next` zeroes the value before scanning in the result. +func (c *Cursor) Next(dest interface{}) bool { + if c == nil { + return false + } + + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return false + } + + hasMore, err := c.nextLocked(dest, true) + if c.handleErrorLocked(err) != nil { + c.mu.Unlock() + c.Close() + return false + } + c.mu.Unlock() + + if !hasMore { + c.Close() + } + + return hasMore +} + +func (c *Cursor) nextLocked(dest interface{}, progressCursor bool) (bool, error) { + for { + if err := c.seekCursor(true); err != nil { + return false, err + } + + if c.closed { + return false, nil + } + + if len(c.buffer) == 0 && c.finished { + return false, nil + } + + if len(c.buffer) > 0 { + data := c.buffer[0] + if progressCursor { + c.buffer = c.buffer[1:] + } + + err := encoding.Decode(dest, data) + if err != nil { + return false, err + } + + return true, nil + } + } +} + +// Peek behaves similarly to Next, retreiving the next document from the result set +// and blocking if necessary. Peek, however, does not progress the position of the cursor. +// This can be useful for expressions which can return different types to attempt to +// decode them into different interfaces. +// +// Like Next, it will also automatically retrieve another batch of documents from +// the server when the current one is exhausted, or before that in background +// if possible. +// +// Unlike Next, Peek does not progress the position of the cursor. Peek +// will return errors from decoding, but they will not be persisted in the cursor +// and therefore will not be available on cursor.Err(). This can be useful for +// expressions that can return different types to attempt to decode them into +// different interfaces. +// +// Peek returns true if a document was successfully unmarshalled onto result, +// and false at the end of the result set or if an error happened. Peek also +// returns the error (if any) that occured +func (c *Cursor) Peek(dest interface{}) (bool, error) { + if c == nil { + return false, errNilCursor + } + + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return false, nil + } + + hasMore, err := c.nextLocked(dest, false) + if _, isDecodeErr := err.(*encoding.DecodeTypeError); isDecodeErr { + c.mu.Unlock() + return false, err + } + + if c.handleErrorLocked(err) != nil { + c.mu.Unlock() + c.Close() + return false, err + } + c.mu.Unlock() + + return hasMore, nil +} + +// Skip progresses the cursor by one record. It is useful after a successful +// Peek to avoid duplicate decoding work. +func (c *Cursor) Skip() { + if c == nil { + return + } + + c.mu.Lock() + defer c.mu.Unlock() + c.pendingSkips++ +} + +// NextResponse retrieves the next raw response from the result set, blocking if necessary. +// Unlike Next the returned response is the raw JSON document returned from the +// database. +// +// NextResponse returns false (and a nil byte slice) at the end of the result +// set or if an error happened. +func (c *Cursor) NextResponse() ([]byte, bool) { + if c == nil { + return nil, false + } + + c.mu.Lock() + if c.closed { + c.mu.Unlock() + return nil, false + } + + b, hasMore, err := c.nextResponseLocked() + if c.handleErrorLocked(err) != nil { + c.mu.Unlock() + c.Close() + return nil, false + } + c.mu.Unlock() + + if !hasMore { + c.Close() + } + + return b, hasMore +} + +func (c *Cursor) nextResponseLocked() ([]byte, bool, error) { + for { + if err := c.seekCursor(false); err != nil { + return nil, false, err + } + + if len(c.responses) == 0 && c.finished { + return nil, false, nil + } + + if len(c.responses) > 0 { + var response json.RawMessage + response, c.responses = c.responses[0], c.responses[1:] + + return []byte(response), true, nil + } + } +} + +// All retrieves all documents from the result set into the provided slice +// and closes the cursor. +// +// The result argument must necessarily be the address for a slice. The slice +// may be nil or previously allocated. +// +// Also note that you are able to reuse the same variable multiple times as +// `All` zeroes the value before scanning in the result. It also attempts +// to reuse the existing slice without allocating any more space by either +// resizing or returning a selection of the slice if necessary. +func (c *Cursor) All(result interface{}) error { + if c == nil { + return errNilCursor + } + + resultv := reflect.ValueOf(result) + if resultv.Kind() != reflect.Ptr || resultv.Elem().Kind() != reflect.Slice { + panic("result argument must be a slice address") + } + slicev := resultv.Elem() + slicev = slicev.Slice(0, slicev.Cap()) + elemt := slicev.Type().Elem() + i := 0 + for { + if slicev.Len() == i { + elemp := reflect.New(elemt) + if !c.Next(elemp.Interface()) { + break + } + slicev = reflect.Append(slicev, elemp.Elem()) + slicev = slicev.Slice(0, slicev.Cap()) + } else { + if !c.Next(slicev.Index(i).Addr().Interface()) { + break + } + } + i++ + } + resultv.Elem().Set(slicev.Slice(0, i)) + + if err := c.Err(); err != nil { + c.Close() + return err + } + + if err := c.Close(); err != nil { + return err + } + + return nil +} + +// One retrieves a single document from the result set into the provided +// slice and closes the cursor. +// +// Also note that you are able to reuse the same variable multiple times as +// `One` zeroes the value before scanning in the result. +func (c *Cursor) One(result interface{}) error { + if c == nil { + return errNilCursor + } + + if c.IsNil() { + c.Close() + return ErrEmptyResult + } + + hasResult := c.Next(result) + + if err := c.Err(); err != nil { + c.Close() + return err + } + + if err := c.Close(); err != nil { + return err + } + + if !hasResult { + return ErrEmptyResult + } + + return nil +} + +// Interface retrieves all documents from the result set and returns the data +// as an interface{} and closes the cursor. +// +// If the query returns multiple documents then a slice will be returned, +// otherwise a single value will be returned. +func (c *Cursor) Interface() (interface{}, error) { + if c == nil { + return nil, errNilCursor + } + + var results []interface{} + var result interface{} + for c.Next(&result) { + results = append(results, result) + } + + if err := c.Err(); err != nil { + return nil, err + } + + c.mu.RLock() + isSingleValue := c.isSingleValue + c.mu.RUnlock() + + if isSingleValue { + if len(results) == 0 { + return nil, nil + } + + return results[0], nil + } + + return results, nil +} + +// Listen listens for rows from the database and sends the result onto the given +// channel. The type that the row is scanned into is determined by the element +// type of the channel. +// +// Also note that this function returns immediately. +// +// cursor, err := r.Expr([]int{1,2,3}).Run(session) +// if err != nil { +// panic(err) +// } +// +// ch := make(chan int) +// cursor.Listen(ch) +// <- ch // 1 +// <- ch // 2 +// <- ch // 3 +func (c *Cursor) Listen(channel interface{}) { + go func() { + channelv := reflect.ValueOf(channel) + if channelv.Kind() != reflect.Chan { + panic("input argument must be a channel") + } + elemt := channelv.Type().Elem() + for { + elemp := reflect.New(elemt) + if !c.Next(elemp.Interface()) { + break + } + + channelv.Send(elemp.Elem()) + } + + c.Close() + channelv.Close() + }() +} + +// IsNil tests if the current row is nil. +func (c *Cursor) IsNil() bool { + if c == nil { + return true + } + + c.mu.RLock() + defer c.mu.RUnlock() + + if len(c.buffer) > 0 { + return c.buffer[0] == nil + } + + if len(c.responses) > 0 { + response := c.responses[0] + if response == nil { + return true + } + + if string(response) == "null" { + return true + } + + return false + } + + return true +} + +// fetchMore fetches more rows from the database. +// +// If wait is true then it will wait for the database to reply otherwise it +// will return after sending the continue query. +func (c *Cursor) fetchMore() error { + var err error + + if !c.fetching { + c.fetching = true + + if c.closed { + return errCursorClosed + } + + q := Query{ + Type: p.Query_CONTINUE, + Token: c.token, + } + + c.mu.Unlock() + _, _, err = c.conn.Query(q) + c.mu.Lock() + } + + return err +} + +// handleError sets the value of lastErr to err if lastErr is not yet set. +func (c *Cursor) handleError(err error) error { + c.mu.Lock() + defer c.mu.Unlock() + + return c.handleErrorLocked(err) +} + +func (c *Cursor) handleErrorLocked(err error) error { + if c.lastErr == nil { + c.lastErr = err + } + + return c.lastErr +} + +// extend adds the result of a continue query to the cursor. +func (c *Cursor) extend(response *Response) { + c.mu.Lock() + defer c.mu.Unlock() + + c.extendLocked(response) +} + +func (c *Cursor) extendLocked(response *Response) { + c.responses = append(c.responses, response.Responses...) + c.finished = response.Type != p.Response_SUCCESS_PARTIAL + c.fetching = false + c.isAtom = response.Type == p.Response_SUCCESS_ATOM + + putResponse(response) +} + +// seekCursor takes care of loading more data if needed and applying pending skips +// +// bufferResponse determines whether the response will be parsed into the buffer +func (c *Cursor) seekCursor(bufferResponse bool) error { + if c.lastErr != nil { + return c.lastErr + } + + if len(c.buffer) == 0 && len(c.responses) == 0 && c.closed { + return errCursorClosed + } + + // Loop over loading data, applying skips as necessary and loading more data as needed + // until either the cursor is closed or finished, or we have applied all outstanding + // skips and data is available + for { + c.applyPendingSkips(bufferResponse) // if we are buffering the responses, skip can drain from the buffer + + if bufferResponse && len(c.buffer) == 0 && len(c.responses) > 0 { + if err := c.bufferNextResponse(); err != nil { + return err + } + continue // go around the loop again to re-apply pending skips + } else if len(c.buffer) == 0 && len(c.responses) == 0 && !c.finished { + // We skipped all of our data, load some more + if err := c.fetchMore(); err != nil { + return err + } + if c.closed { + return nil + } + continue // go around the loop again to re-apply pending skips + } + return nil + } +} + +// applyPendingSkips applies all pending skips to the buffer and +// returns whether there are more pending skips to be applied +// +// if drainFromBuffer is true, we will drain from the buffer, otherwise +// we drain from the responses +func (c *Cursor) applyPendingSkips(drainFromBuffer bool) (stillPending bool) { + if c.pendingSkips == 0 { + return false + } + + if drainFromBuffer { + if len(c.buffer) > c.pendingSkips { + c.buffer = c.buffer[c.pendingSkips:] + c.pendingSkips = 0 + return false + } + + c.pendingSkips -= len(c.buffer) + c.buffer = c.buffer[:0] + return c.pendingSkips > 0 + } + + if len(c.responses) > c.pendingSkips { + c.responses = c.responses[c.pendingSkips:] + c.pendingSkips = 0 + return false + } + + c.pendingSkips -= len(c.responses) + c.responses = c.responses[:0] + return c.pendingSkips > 0 +} + +// bufferResponse reads a single response and stores the result into the buffer +// if the response is from an atomic response, it will check if the +// response contains multiple records and store them all into the buffer +func (c *Cursor) bufferNextResponse() error { + if c.closed { + return errCursorClosed + } + // If there are no responses, nothing to do + if len(c.responses) == 0 { + return nil + } + + response := c.responses[0] + c.responses = c.responses[1:] + + var value interface{} + decoder := json.NewDecoder(bytes.NewBuffer(response)) + if c.connOpts.UseJSONNumber { + decoder.UseNumber() + } + err := decoder.Decode(&value) + if err != nil { + return err + } + + value, err = recursivelyConvertPseudotype(value, c.opts) + if err != nil { + return err + } + + // If response is an ATOM then try and convert to an array + if data, ok := value.([]interface{}); ok && c.isAtom { + c.buffer = append(c.buffer, data...) + } else if value == nil { + c.buffer = append(c.buffer, nil) + } else { + c.buffer = append(c.buffer, value) + + // If this is the only value in the response and the response was an + // atom then set the single value flag + if c.isAtom { + c.isSingleValue = true + } + } + return nil +} diff --git a/vendor/github.com/GoRethink/gorethink/doc.go b/vendor/github.com/GoRethink/gorethink/doc.go new file mode 100644 index 0000000..f48cf88 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/doc.go @@ -0,0 +1,6 @@ +// Package gorethink implements a Go driver for RethinkDB +// +// Current version: v3.0.0 (RethinkDB v2.3) +// For more in depth information on how to use RethinkDB check out the API docs +// at http://rethinkdb.com/api +package gorethink diff --git a/vendor/github.com/GoRethink/gorethink/errors.go b/vendor/github.com/GoRethink/gorethink/errors.go new file mode 100644 index 0000000..6a257e6 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/errors.go @@ -0,0 +1,182 @@ +package gorethink + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "strings" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +var ( + // ErrNoHosts is returned when no hosts to the Connect method. + ErrNoHosts = errors.New("no hosts provided") + // ErrNoConnectionsStarted is returned when the driver couldn't to any of + // the provided hosts. + ErrNoConnectionsStarted = errors.New("no connections were made when creating the session") + // ErrInvalidNode is returned when attempting to connect to a node which + // returns an invalid response. + ErrInvalidNode = errors.New("invalid node") + // ErrNoConnections is returned when there are no active connections in the + // clusters connection pool. + ErrNoConnections = errors.New("gorethink: no connections were available") + // ErrConnectionClosed is returned when trying to send a query with a closed + // connection. + ErrConnectionClosed = errors.New("gorethink: the connection is closed") +) + +func printCarrots(t Term, frames []*p.Frame) string { + var frame *p.Frame + if len(frames) > 1 { + frame, frames = frames[0], frames[1:] + } else if len(frames) == 1 { + frame, frames = frames[0], []*p.Frame{} + } + + for i, arg := range t.args { + if frame.GetPos() == int64(i) { + t.args[i] = Term{ + termType: p.Term_DATUM, + data: printCarrots(arg, frames), + } + } + } + + for k, arg := range t.optArgs { + if frame.GetOpt() == k { + t.optArgs[k] = Term{ + termType: p.Term_DATUM, + data: printCarrots(arg, frames), + } + } + } + + b := &bytes.Buffer{} + for _, c := range t.String() { + if c != '^' { + b.WriteString(" ") + } else { + b.WriteString("^") + } + } + + return b.String() +} + +// Error constants +var ErrEmptyResult = errors.New("The result does not contain any more rows") + +// Connection/Response errors + +// rqlResponseError is the base type for all errors, it formats both +// for the response and query if set. +type rqlServerError struct { + response *Response + term *Term +} + +func (e rqlServerError) Error() string { + var err = "An error occurred" + if e.response != nil { + json.Unmarshal(e.response.Responses[0], &err) + } + + if e.term == nil { + return fmt.Sprintf("gorethink: %s", err) + } + + return fmt.Sprintf("gorethink: %s in:\n%s", err, e.term.String()) + +} + +func (e rqlServerError) String() string { + return e.Error() +} + +type rqlError string + +func (e rqlError) Error() string { + return fmt.Sprintf("gorethink: %s", string(e)) +} + +func (e rqlError) String() string { + return e.Error() +} + +// Exported Error "Implementations" + +type RQLClientError struct{ rqlServerError } +type RQLCompileError struct{ rqlServerError } +type RQLDriverCompileError struct{ RQLCompileError } +type RQLServerCompileError struct{ RQLCompileError } +type RQLAuthError struct{ RQLDriverError } +type RQLRuntimeError struct{ rqlServerError } + +type RQLQueryLogicError struct{ RQLRuntimeError } +type RQLNonExistenceError struct{ RQLQueryLogicError } +type RQLResourceLimitError struct{ RQLRuntimeError } +type RQLUserError struct{ RQLRuntimeError } +type RQLInternalError struct{ RQLRuntimeError } +type RQLTimeoutError struct{ rqlServerError } +type RQLAvailabilityError struct{ RQLRuntimeError } +type RQLOpFailedError struct{ RQLAvailabilityError } +type RQLOpIndeterminateError struct{ RQLAvailabilityError } + +// RQLDriverError represents an unexpected error with the driver, if this error +// persists please create an issue. +type RQLDriverError struct { + rqlError +} + +// RQLConnectionError represents an error when communicating with the database +// server. +type RQLConnectionError struct { + rqlError +} + +func createRuntimeError(errorType p.Response_ErrorType, response *Response, term *Term) error { + serverErr := rqlServerError{response, term} + + switch errorType { + case p.Response_QUERY_LOGIC: + return RQLQueryLogicError{RQLRuntimeError{serverErr}} + case p.Response_NON_EXISTENCE: + return RQLNonExistenceError{RQLQueryLogicError{RQLRuntimeError{serverErr}}} + case p.Response_RESOURCE_LIMIT: + return RQLResourceLimitError{RQLRuntimeError{serverErr}} + case p.Response_USER: + return RQLUserError{RQLRuntimeError{serverErr}} + case p.Response_INTERNAL: + return RQLInternalError{RQLRuntimeError{serverErr}} + case p.Response_OP_FAILED: + return RQLOpFailedError{RQLAvailabilityError{RQLRuntimeError{serverErr}}} + case p.Response_OP_INDETERMINATE: + return RQLOpIndeterminateError{RQLAvailabilityError{RQLRuntimeError{serverErr}}} + default: + return RQLRuntimeError{serverErr} + } +} + +// Error type helpers + +// IsConflictErr returns true if the error is non-nil and the query failed +// due to a duplicate primary key. +func IsConflictErr(err error) bool { + if err == nil { + return false + } + + return strings.HasPrefix(err.Error(), "Duplicate primary key") +} + +// IsTypeErr returns true if the error is non-nil and the query failed due +// to a type error. +func IsTypeErr(err error) bool { + if err == nil { + return false + } + + return strings.HasPrefix(err.Error(), "Expected type") +} diff --git a/vendor/github.com/GoRethink/gorethink/gorethink.go b/vendor/github.com/GoRethink/gorethink/gorethink.go new file mode 100644 index 0000000..92e5ac4 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/gorethink.go @@ -0,0 +1,58 @@ +package gorethink + +import ( + "io/ioutil" + "reflect" + + "github.com/Sirupsen/logrus" + + "gopkg.in/gorethink/gorethink.v2/encoding" +) + +var ( + Log *logrus.Logger +) + +const ( + SystemDatabase = "rethinkdb" + + TableConfigSystemTable = "table_config" + ServerConfigSystemTable = "server_config" + DBConfigSystemTable = "db_config" + ClusterConfigSystemTable = "cluster_config" + TableStatusSystemTable = "table_status" + ServerStatusSystemTable = "server_status" + CurrentIssuesSystemTable = "current_issues" + UsersSystemTable = "users" + PermissionsSystemTable = "permissions" + JobsSystemTable = "jobs" + StatsSystemTable = "stats" + LogsSystemTable = "logs" +) + +func init() { + // Set encoding package + encoding.IgnoreType(reflect.TypeOf(Term{})) + + Log = logrus.New() + Log.Out = ioutil.Discard // By default don't log anything +} + +// SetVerbose allows the driver logging level to be set. If true is passed then +// the log level is set to Debug otherwise it defaults to Info. +func SetVerbose(verbose bool) { + if verbose { + Log.Level = logrus.DebugLevel + return + } + + Log.Level = logrus.InfoLevel +} + +// SetTags allows you to override the tags used when decoding or encoding +// structs. The driver will check for the tags in the same order that they were +// passed into this function. If no parameters are passed then the driver will +// default to checking for the gorethink tag (the gorethink tag is always included) +func SetTags(tags ...string) { + encoding.Tags = append(tags, "gorethink") +} diff --git a/vendor/github.com/GoRethink/gorethink/host.go b/vendor/github.com/GoRethink/gorethink/host.go new file mode 100644 index 0000000..44228eb --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/host.go @@ -0,0 +1,24 @@ +package gorethink + +import ( + "fmt" +) + +// Host name and port of server +type Host struct { + Name string + Port int +} + +// NewHost create a new Host +func NewHost(name string, port int) Host { + return Host{ + Name: name, + Port: port, + } +} + +// Returns host address (name:port) +func (h Host) String() string { + return fmt.Sprintf("%s:%d", h.Name, h.Port) +} diff --git a/vendor/github.com/GoRethink/gorethink/mock.go b/vendor/github.com/GoRethink/gorethink/mock.go new file mode 100644 index 0000000..b11d4e8 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/mock.go @@ -0,0 +1,394 @@ +package gorethink + +import ( + "encoding/json" + "fmt" + "reflect" + "sync" + "time" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Mocking is based on the amazing package github.com/stretchr/testify + +// testingT is an interface wrapper around *testing.T +type testingT interface { + Logf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + FailNow() +} + +// MockAnything can be used in place of any term, this is useful when you want +// mock similar queries or queries that you don't quite know the exact structure +// of. +func MockAnything() Term { + t := constructRootTerm("MockAnything", p.Term_DATUM, nil, nil) + t.isMockAnything = true + + return t +} + +func (t Term) MockAnything() Term { + t = constructMethodTerm(t, "MockAnything", p.Term_DATUM, nil, nil) + t.isMockAnything = true + + return t +} + +// MockQuery represents a mocked query and is used for setting expectations, +// as well as recording activity. +type MockQuery struct { + parent *Mock + + // Holds the query and term + Query Query + + // Holds the JSON representation of query + BuiltQuery []byte + + // Holds the response that should be returned when this method is executed. + Response interface{} + + // Holds the error that should be returned when this method is executed. + Error error + + // The number of times to return the return arguments when setting + // expectations. 0 means to always return the value. + Repeatability int + + // Holds a channel that will be used to block the Return until it either + // recieves a message or is closed. nil means it returns immediately. + WaitFor <-chan time.Time + + // Amount of times this query has been executed + executed int +} + +func newMockQuery(parent *Mock, q Query) *MockQuery { + // Build and marshal term + builtQuery, err := json.Marshal(q.Build()) + if err != nil { + panic(fmt.Sprintf("Failed to build query: %s", err)) + } + + return &MockQuery{ + parent: parent, + Query: q, + BuiltQuery: builtQuery, + Response: make([]interface{}, 0), + Repeatability: 0, + WaitFor: nil, + } +} + +func newMockQueryFromTerm(parent *Mock, t Term, opts map[string]interface{}) *MockQuery { + q, err := parent.newQuery(t, opts) + if err != nil { + panic(fmt.Sprintf("Failed to build query: %s", err)) + } + + return newMockQuery(parent, q) +} + +func (mq *MockQuery) lock() { + mq.parent.mu.Lock() +} + +func (mq *MockQuery) unlock() { + mq.parent.mu.Unlock() +} + +// Return specifies the return arguments for the expectation. +// +// mock.On(r.Table("test")).Return(nil, errors.New("failed")) +func (mq *MockQuery) Return(response interface{}, err error) *MockQuery { + mq.lock() + defer mq.unlock() + + mq.Response = response + mq.Error = err + + return mq +} + +// Once indicates that that the mock should only return the value once. +// +// mock.On(r.Table("test")).Return(result, nil).Once() +func (mq *MockQuery) Once() *MockQuery { + return mq.Times(1) +} + +// Twice indicates that that the mock should only return the value twice. +// +// mock.On(r.Table("test")).Return(result, nil).Twice() +func (mq *MockQuery) Twice() *MockQuery { + return mq.Times(2) +} + +// Times indicates that that the mock should only return the indicated number +// of times. +// +// mock.On(r.Table("test")).Return(result, nil).Times(5) +func (mq *MockQuery) Times(i int) *MockQuery { + mq.lock() + defer mq.unlock() + mq.Repeatability = i + return mq +} + +// WaitUntil sets the channel that will block the mock's return until its closed +// or a message is received. +// +// mock.On(r.Table("test")).WaitUntil(time.After(time.Second)) +func (mq *MockQuery) WaitUntil(w <-chan time.Time) *MockQuery { + mq.lock() + defer mq.unlock() + mq.WaitFor = w + return mq +} + +// After sets how long to block until the query returns +// +// mock.On(r.Table("test")).After(time.Second) +func (mq *MockQuery) After(d time.Duration) *MockQuery { + return mq.WaitUntil(time.After(d)) +} + +// On chains a new expectation description onto the mocked interface. This +// allows syntax like. +// +// Mock. +// On(r.Table("test")).Return(result, nil). +// On(r.Table("test2")).Return(nil, errors.New("Some Error")) +func (mq *MockQuery) On(t Term) *MockQuery { + return mq.parent.On(t) +} + +// Mock is used to mock query execution and verify that the expected queries are +// being executed. Mocks are used by creating an instance using NewMock and then +// passing this when running your queries instead of a session. For example: +// +// mock := r.NewMock() +// mock.On(r.Table("test")).Return([]interface{}{data}, nil) +// +// cursor, err := r.Table("test").Run(mock) +// +// mock.AssertExpectations(t) +type Mock struct { + mu sync.Mutex + opts ConnectOpts + + ExpectedQueries []*MockQuery + Queries []MockQuery +} + +// NewMock creates an instance of Mock, you can optionally pass ConnectOpts to +// the function, if passed any mocked query will be generated using those +// options. +func NewMock(opts ...ConnectOpts) *Mock { + m := &Mock{ + ExpectedQueries: make([]*MockQuery, 0), + Queries: make([]MockQuery, 0), + } + + if len(opts) > 0 { + m.opts = opts[0] + } + + return m +} + +// On starts a description of an expectation of the specified query +// being executed. +// +// mock.On(r.Table("test")) +func (m *Mock) On(t Term, opts ...map[string]interface{}) *MockQuery { + var qopts map[string]interface{} + if len(opts) > 0 { + qopts = opts[0] + } + + m.mu.Lock() + defer m.mu.Unlock() + mq := newMockQueryFromTerm(m, t, qopts) + m.ExpectedQueries = append(m.ExpectedQueries, mq) + return mq +} + +// AssertExpectations asserts that everything specified with On and Return was +// in fact executed as expected. Queries may have been executed in any order. +func (m *Mock) AssertExpectations(t testingT) bool { + var somethingMissing bool + var failedExpectations int + + // iterate through each expectation + expectedQueries := m.expectedQueries() + for _, expectedQuery := range expectedQueries { + if !m.queryWasExecuted(expectedQuery) && expectedQuery.executed == 0 { + somethingMissing = true + failedExpectations++ + t.Logf("❌\t%s", expectedQuery.Query.Term.String()) + } else { + m.mu.Lock() + if expectedQuery.Repeatability > 0 { + somethingMissing = true + failedExpectations++ + } else { + t.Logf("✅\t%s", expectedQuery.Query.Term.String()) + } + m.mu.Unlock() + } + } + + if somethingMissing { + t.Errorf("FAIL: %d out of %d expectation(s) were met.\n\tThe query you are testing needs to be executed %d more times(s).", len(expectedQueries)-failedExpectations, len(expectedQueries), failedExpectations) + } + + return !somethingMissing +} + +// AssertNumberOfExecutions asserts that the query was executed expectedExecutions times. +func (m *Mock) AssertNumberOfExecutions(t testingT, expectedQuery *MockQuery, expectedExecutions int) bool { + var actualExecutions int + for _, query := range m.queries() { + if query.Query.Term.compare(*expectedQuery.Query.Term, map[int64]int64{}) && query.Repeatability > -1 { + // if bytes.Equal(query.BuiltQuery, expectedQuery.BuiltQuery) { + actualExecutions++ + } + } + + if expectedExecutions != actualExecutions { + t.Errorf("Expected number of executions (%d) does not match the actual number of executions (%d).", expectedExecutions, actualExecutions) + return false + } + + return true +} + +// AssertExecuted asserts that the method was executed. +// It can produce a false result when an argument is a pointer type and the underlying value changed after executing the mocked method. +func (m *Mock) AssertExecuted(t testingT, expectedQuery *MockQuery) bool { + if !m.queryWasExecuted(expectedQuery) { + t.Errorf("The query \"%s\" should have been executed, but was not.", expectedQuery.Query.Term.String()) + return false + } + return true +} + +// AssertNotExecuted asserts that the method was not executed. +// It can produce a false result when an argument is a pointer type and the underlying value changed after executing the mocked method. +func (m *Mock) AssertNotExecuted(t testingT, expectedQuery *MockQuery) bool { + if m.queryWasExecuted(expectedQuery) { + t.Errorf("The query \"%s\" was executed, but should NOT have been.", expectedQuery.Query.Term.String()) + return false + } + return true +} + +func (m *Mock) IsConnected() bool { + return true +} + +func (m *Mock) Query(q Query) (*Cursor, error) { + found, query := m.findExpectedQuery(q) + + if found < 0 { + panic(fmt.Sprintf("gorethink: mock: This query was unexpected:\n\t\t%s", q.Term.String())) + } else { + m.mu.Lock() + switch { + case query.Repeatability == 1: + query.Repeatability = -1 + query.executed++ + + case query.Repeatability > 1: + query.Repeatability-- + query.executed++ + + case query.Repeatability == 0: + query.executed++ + } + m.mu.Unlock() + } + + // add the query + m.mu.Lock() + m.Queries = append(m.Queries, *newMockQuery(m, q)) + m.mu.Unlock() + + // block if specified + if query.WaitFor != nil { + <-query.WaitFor + } + + // Return error without building cursor if non-nil + if query.Error != nil { + return nil, query.Error + } + + // Build cursor and return + c := newCursor(nil, "", query.Query.Token, query.Query.Term, query.Query.Opts) + c.finished = true + c.fetching = false + c.isAtom = true + + responseVal := reflect.ValueOf(query.Response) + if responseVal.Kind() == reflect.Slice || responseVal.Kind() == reflect.Array { + for i := 0; i < responseVal.Len(); i++ { + c.buffer = append(c.buffer, responseVal.Index(i).Interface()) + } + } else { + c.buffer = append(c.buffer, query.Response) + } + + return c, nil +} + +func (m *Mock) Exec(q Query) error { + _, err := m.Query(q) + + return err +} + +func (m *Mock) newQuery(t Term, opts map[string]interface{}) (Query, error) { + return newQuery(t, opts, &m.opts) +} + +func (m *Mock) findExpectedQuery(q Query) (int, *MockQuery) { + m.mu.Lock() + defer m.mu.Unlock() + + for i, query := range m.ExpectedQueries { + // if bytes.Equal(query.BuiltQuery, builtQuery) && query.Repeatability > -1 { + if query.Query.Term.compare(*q.Term, map[int64]int64{}) && query.Repeatability > -1 { + return i, query + } + } + + return -1, nil +} + +func (m *Mock) queryWasExecuted(expectedQuery *MockQuery) bool { + for _, query := range m.queries() { + if query.Query.Term.compare(*expectedQuery.Query.Term, map[int64]int64{}) { + // if bytes.Equal(query.BuiltQuery, expectedQuery.BuiltQuery) { + return true + } + } + + // we didn't find the expected query + return false +} + +func (m *Mock) expectedQueries() []*MockQuery { + m.mu.Lock() + defer m.mu.Unlock() + return append([]*MockQuery{}, m.ExpectedQueries...) +} + +func (m *Mock) queries() []MockQuery { + m.mu.Lock() + defer m.mu.Unlock() + return append([]MockQuery{}, m.Queries...) +} diff --git a/vendor/github.com/GoRethink/gorethink/node.go b/vendor/github.com/GoRethink/gorethink/node.go new file mode 100644 index 0000000..3178147 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/node.go @@ -0,0 +1,133 @@ +package gorethink + +import ( + "sync" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Node represents a database server in the cluster +type Node struct { + ID string + Host Host + aliases []Host + + cluster *Cluster + pool *Pool + + mu sync.RWMutex + closed bool +} + +func newNode(id string, aliases []Host, cluster *Cluster, pool *Pool) *Node { + node := &Node{ + ID: id, + Host: aliases[0], + aliases: aliases, + cluster: cluster, + pool: pool, + } + + return node +} + +// Closed returns true if the node is closed +func (n *Node) Closed() bool { + n.mu.RLock() + defer n.mu.RUnlock() + + return n.closed +} + +// Close closes the session +func (n *Node) Close(optArgs ...CloseOpts) error { + n.mu.Lock() + defer n.mu.Unlock() + + if n.closed { + return nil + } + + if len(optArgs) >= 1 { + if optArgs[0].NoReplyWait { + n.NoReplyWait() + } + } + + if n.pool != nil { + n.pool.Close() + } + n.pool = nil + n.closed = true + + return nil +} + +// SetInitialPoolCap sets the initial capacity of the connection pool. +func (n *Node) SetInitialPoolCap(idleConns int) { + n.pool.SetInitialPoolCap(idleConns) +} + +// SetMaxIdleConns sets the maximum number of connections in the idle +// connection pool. +func (n *Node) SetMaxIdleConns(idleConns int) { + n.pool.SetMaxIdleConns(idleConns) +} + +// SetMaxOpenConns sets the maximum number of open connections to the database. +func (n *Node) SetMaxOpenConns(openConns int) { + n.pool.SetMaxOpenConns(openConns) +} + +// NoReplyWait ensures that previous queries with the noreply flag have been +// processed by the server. Note that this guarantee only applies to queries +// run on the given connection +func (n *Node) NoReplyWait() error { + return n.pool.Exec(Query{ + Type: p.Query_NOREPLY_WAIT, + }) +} + +// Query executes a ReQL query using this nodes connection pool. +func (n *Node) Query(q Query) (cursor *Cursor, err error) { + if n.Closed() { + return nil, ErrInvalidNode + } + + return n.pool.Query(q) +} + +// Exec executes a ReQL query using this nodes connection pool. +func (n *Node) Exec(q Query) (err error) { + if n.Closed() { + return ErrInvalidNode + } + + return n.pool.Exec(q) +} + +// Server returns the server name and server UUID being used by a connection. +func (n *Node) Server() (ServerResponse, error) { + var response ServerResponse + + if n.Closed() { + return response, ErrInvalidNode + } + + return n.pool.Server() +} + +type nodeStatus struct { + ID string `gorethink:"id"` + Name string `gorethink:"name"` + Status string `gorethink:"status"` + Network struct { + Hostname string `gorethink:"hostname"` + ClusterPort int64 `gorethink:"cluster_port"` + ReqlPort int64 `gorethink:"reql_port"` + CanonicalAddresses []struct { + Host string `gorethink:"host"` + Port int64 `gorethink:"port"` + } `gorethink:"canonical_addresses"` + } `gorethink:"network"` +} diff --git a/vendor/github.com/GoRethink/gorethink/pool.go b/vendor/github.com/GoRethink/gorethink/pool.go new file mode 100644 index 0000000..1a7bfa7 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/pool.go @@ -0,0 +1,200 @@ +package gorethink + +import ( + "errors" + "fmt" + "net" + "sync" + + "gopkg.in/fatih/pool.v2" +) + +var ( + errPoolClosed = errors.New("gorethink: pool is closed") +) + +// A Pool is used to store a pool of connections to a single RethinkDB server +type Pool struct { + host Host + opts *ConnectOpts + + pool pool.Pool + + mu sync.RWMutex // protects following fields + closed bool +} + +// NewPool creates a new connection pool for the given host +func NewPool(host Host, opts *ConnectOpts) (*Pool, error) { + initialCap := opts.InitialCap + if initialCap <= 0 { + // Fallback to MaxIdle if InitialCap is zero, this should be removed + // when MaxIdle is removed + initialCap = opts.MaxIdle + } + + maxOpen := opts.MaxOpen + if maxOpen <= 0 { + maxOpen = 2 + } + + p, err := pool.NewChannelPool(initialCap, maxOpen, func() (net.Conn, error) { + conn, err := NewConnection(host.String(), opts) + if err != nil { + return nil, err + } + + return conn, err + }) + if err != nil { + return nil, err + } + + return &Pool{ + pool: p, + host: host, + opts: opts, + }, nil +} + +// Ping verifies a connection to the database is still alive, +// establishing a connection if necessary. +func (p *Pool) Ping() error { + _, pc, err := p.conn() + if err != nil { + return err + } + return pc.Close() +} + +// Close closes the database, releasing any open resources. +// +// It is rare to Close a Pool, as the Pool handle is meant to be +// long-lived and shared between many goroutines. +func (p *Pool) Close() error { + p.mu.RLock() + defer p.mu.RUnlock() + if p.closed { + return nil + } + + p.pool.Close() + + return nil +} + +func (p *Pool) conn() (*Connection, *pool.PoolConn, error) { + p.mu.RLock() + defer p.mu.RUnlock() + + if p.closed { + return nil, nil, errPoolClosed + } + + nc, err := p.pool.Get() + if err != nil { + return nil, nil, err + } + + pc, ok := nc.(*pool.PoolConn) + if !ok { + // This should never happen! + return nil, nil, fmt.Errorf("Invalid connection in pool") + } + + conn, ok := pc.Conn.(*Connection) + if !ok { + // This should never happen! + return nil, nil, fmt.Errorf("Invalid connection in pool") + } + + return conn, pc, nil +} + +// SetInitialPoolCap sets the initial capacity of the connection pool. +// +// Deprecated: This value should only be set when connecting +func (p *Pool) SetInitialPoolCap(n int) { + return +} + +// SetMaxIdleConns sets the maximum number of connections in the idle +// connection pool. +// +// Deprecated: This value should only be set when connecting +func (p *Pool) SetMaxIdleConns(n int) { + return +} + +// SetMaxOpenConns sets the maximum number of open connections to the database. +// +// Deprecated: This value should only be set when connecting +func (p *Pool) SetMaxOpenConns(n int) { + return +} + +// Query execution functions + +// Exec executes a query without waiting for any response. +func (p *Pool) Exec(q Query) error { + c, pc, err := p.conn() + if err != nil { + return err + } + defer pc.Close() + + _, _, err = c.Query(q) + + if c.isBad() { + pc.MarkUnusable() + } + + return err +} + +// Query executes a query and waits for the response +func (p *Pool) Query(q Query) (*Cursor, error) { + c, pc, err := p.conn() + if err != nil { + return nil, err + } + + _, cursor, err := c.Query(q) + + if err == nil { + cursor.releaseConn = releaseConn(c, pc) + } else if c.isBad() { + pc.MarkUnusable() + } + + return cursor, err +} + +// Server returns the server name and server UUID being used by a connection. +func (p *Pool) Server() (ServerResponse, error) { + var response ServerResponse + + c, pc, err := p.conn() + if err != nil { + return response, err + } + defer pc.Close() + + response, err = c.Server() + + if c.isBad() { + pc.MarkUnusable() + } + + return response, err +} + +func releaseConn(c *Connection, pc *pool.PoolConn) func() error { + return func() error { + if c.isBad() { + pc.MarkUnusable() + } + + return pc.Close() + } +} diff --git a/vendor/github.com/GoRethink/gorethink/pseudotypes.go b/vendor/github.com/GoRethink/gorethink/pseudotypes.go new file mode 100644 index 0000000..22a748e --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/pseudotypes.go @@ -0,0 +1,235 @@ +package gorethink + +import ( + "encoding/base64" + "math" + "strconv" + "time" + + "gopkg.in/gorethink/gorethink.v2/types" + + "fmt" +) + +func convertPseudotype(obj map[string]interface{}, opts map[string]interface{}) (interface{}, error) { + if reqlType, ok := obj["$reql_type$"]; ok { + if reqlType == "TIME" { + // load timeFormat, set to native if the option was not set + timeFormat := "native" + if opt, ok := opts["time_format"]; ok { + if sopt, ok := opt.(string); ok { + timeFormat = sopt + } else { + return nil, fmt.Errorf("Invalid time_format run option \"%s\".", opt) + } + } + + if timeFormat == "native" { + return reqlTimeToNativeTime(obj["epoch_time"].(float64), obj["timezone"].(string)) + } else if timeFormat == "raw" { + return obj, nil + } else { + return nil, fmt.Errorf("Unknown time_format run option \"%s\".", reqlType) + } + } else if reqlType == "GROUPED_DATA" { + // load groupFormat, set to native if the option was not set + groupFormat := "native" + if opt, ok := opts["group_format"]; ok { + if sopt, ok := opt.(string); ok { + groupFormat = sopt + } else { + return nil, fmt.Errorf("Invalid group_format run option \"%s\".", opt) + } + } + + if groupFormat == "native" || groupFormat == "slice" { + return reqlGroupedDataToSlice(obj) + } else if groupFormat == "map" { + return reqlGroupedDataToMap(obj) + } else if groupFormat == "raw" { + return obj, nil + } else { + return nil, fmt.Errorf("Unknown group_format run option \"%s\".", reqlType) + } + } else if reqlType == "BINARY" { + binaryFormat := "native" + if opt, ok := opts["binary_format"]; ok { + if sopt, ok := opt.(string); ok { + binaryFormat = sopt + } else { + return nil, fmt.Errorf("Invalid binary_format run option \"%s\".", opt) + } + } + + if binaryFormat == "native" { + return reqlBinaryToNativeBytes(obj) + } else if binaryFormat == "raw" { + return obj, nil + } else { + return nil, fmt.Errorf("Unknown binary_format run option \"%s\".", reqlType) + } + } else if reqlType == "GEOMETRY" { + geometryFormat := "native" + if opt, ok := opts["geometry_format"]; ok { + if sopt, ok := opt.(string); ok { + geometryFormat = sopt + } else { + return nil, fmt.Errorf("Invalid geometry_format run option \"%s\".", opt) + } + } + + if geometryFormat == "native" { + return reqlGeometryToNativeGeometry(obj) + } else if geometryFormat == "raw" { + return obj, nil + } else { + return nil, fmt.Errorf("Unknown geometry_format run option \"%s\".", reqlType) + } + } else { + return obj, nil + } + } + + return obj, nil +} + +func recursivelyConvertPseudotype(obj interface{}, opts map[string]interface{}) (interface{}, error) { + var err error + + switch obj := obj.(type) { + case []interface{}: + for key, val := range obj { + obj[key], err = recursivelyConvertPseudotype(val, opts) + if err != nil { + return nil, err + } + } + case map[string]interface{}: + for key, val := range obj { + obj[key], err = recursivelyConvertPseudotype(val, opts) + if err != nil { + return nil, err + } + } + + pobj, err := convertPseudotype(obj, opts) + if err != nil { + return nil, err + } + + return pobj, nil + } + + return obj, nil +} + +// Pseudo-type helper functions + +func reqlTimeToNativeTime(timestamp float64, timezone string) (time.Time, error) { + sec, ms := math.Modf(timestamp) + + // Convert to native time rounding to milliseconds + t := time.Unix(int64(sec), int64(math.Floor(ms*1000+0.5))*1000*1000) + + // Caclulate the timezone + if timezone != "" { + hours, err := strconv.Atoi(timezone[1:3]) + if err != nil { + return time.Time{}, err + } + minutes, err := strconv.Atoi(timezone[4:6]) + if err != nil { + return time.Time{}, err + } + tzOffset := ((hours * 60) + minutes) * 60 + if timezone[:1] == "-" { + tzOffset = 0 - tzOffset + } + + t = t.In(time.FixedZone(timezone, tzOffset)) + } + + return t, nil +} + +func reqlGroupedDataToSlice(obj map[string]interface{}) (interface{}, error) { + if data, ok := obj["data"]; ok { + ret := []interface{}{} + for _, v := range data.([]interface{}) { + v := v.([]interface{}) + ret = append(ret, map[string]interface{}{ + "group": v[0], + "reduction": v[1], + }) + } + return ret, nil + } + return nil, fmt.Errorf("pseudo-type GROUPED_DATA object %v does not have the expected field \"data\"", obj) +} + +func reqlGroupedDataToMap(obj map[string]interface{}) (interface{}, error) { + if data, ok := obj["data"]; ok { + ret := map[interface{}]interface{}{} + for _, v := range data.([]interface{}) { + v := v.([]interface{}) + ret[v[0]] = v[1] + } + return ret, nil + } + return nil, fmt.Errorf("pseudo-type GROUPED_DATA object %v does not have the expected field \"data\"", obj) +} + +func reqlBinaryToNativeBytes(obj map[string]interface{}) (interface{}, error) { + if data, ok := obj["data"]; ok { + if data, ok := data.(string); ok { + b, err := base64.StdEncoding.DecodeString(data) + if err != nil { + return nil, fmt.Errorf("error decoding pseudo-type BINARY object %v", obj) + } + + return b, nil + } + return nil, fmt.Errorf("pseudo-type BINARY object %v field \"data\" is not valid", obj) + } + return nil, fmt.Errorf("pseudo-type BINARY object %v does not have the expected field \"data\"", obj) +} + +func reqlGeometryToNativeGeometry(obj map[string]interface{}) (interface{}, error) { + if typ, ok := obj["type"]; !ok { + return nil, fmt.Errorf("pseudo-type GEOMETRY object %v does not have the expected field \"type\"", obj) + } else if typ, ok := typ.(string); !ok { + return nil, fmt.Errorf("pseudo-type GEOMETRY object %v field \"type\" is not valid", obj) + } else if coords, ok := obj["coordinates"]; !ok { + return nil, fmt.Errorf("pseudo-type GEOMETRY object %v does not have the expected field \"coordinates\"", obj) + } else if typ == "Point" { + point, err := types.UnmarshalPoint(coords) + if err != nil { + return nil, err + } + + return types.Geometry{ + Type: "Point", + Point: point, + }, nil + } else if typ == "LineString" { + line, err := types.UnmarshalLineString(coords) + if err != nil { + return nil, err + } + return types.Geometry{ + Type: "LineString", + Line: line, + }, nil + } else if typ == "Polygon" { + lines, err := types.UnmarshalPolygon(coords) + if err != nil { + return nil, err + } + return types.Geometry{ + Type: "Polygon", + Lines: lines, + }, nil + } else { + return nil, fmt.Errorf("pseudo-type GEOMETRY object %v field has unknown type %s", obj, typ) + } +} diff --git a/vendor/github.com/GoRethink/gorethink/query.go b/vendor/github.com/GoRethink/gorethink/query.go new file mode 100644 index 0000000..4d95281 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query.go @@ -0,0 +1,455 @@ +package gorethink + +import ( + "fmt" + "reflect" + "strconv" + "strings" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// A Query represents a query ready to be sent to the database, A Query differs +// from a Term as it contains both a query type and token. These values are used +// by the database to determine if the query is continuing a previous request +// and also allows the driver to identify the response as they can come out of +// order. +type Query struct { + Type p.Query_QueryType + Token int64 + Term *Term + Opts map[string]interface{} + builtTerm interface{} +} + +func (q *Query) Build() []interface{} { + res := []interface{}{int(q.Type)} + if q.Term != nil { + res = append(res, q.builtTerm) + } + + if len(q.Opts) > 0 { + // Clone opts and remove custom gorethink options + opts := map[string]interface{}{} + for k, v := range q.Opts { + switch k { + case "geometry_format": + default: + opts[k] = v + } + } + + res = append(res, opts) + } + + return res +} + +type termsList []Term +type termsObj map[string]Term + +// A Term represents a query that is being built. Terms consist of a an array of +// "sub-terms" and a term type. When a Term is a sub-term the first element of +// the terms data is its parent Term. +// +// When built the term becomes a JSON array, for more information on the format +// see http://rethinkdb.com/docs/writing-drivers/. +type Term struct { + name string + rawQuery bool + rootTerm bool + termType p.Term_TermType + data interface{} + args []Term + optArgs map[string]Term + lastErr error + isMockAnything bool +} + +func (t Term) compare(t2 Term, varMap map[int64]int64) bool { + if t.isMockAnything || t2.isMockAnything { + return true + } + + if t.name != t2.name || + t.rawQuery != t2.rawQuery || + t.rootTerm != t2.rootTerm || + t.termType != t2.termType || + !reflect.DeepEqual(t.data, t2.data) || + len(t.args) != len(t2.args) || + len(t.optArgs) != len(t2.optArgs) { + return false + } + + for i, v := range t.args { + if t.termType == p.Term_FUNC && t2.termType == p.Term_FUNC && i == 0 { + // Functions need to be compared differently as each variable + // will have a different var ID so first try to create a mapping + // between the two sets of IDs + argsArr := t.args[0].args + argsArr2 := t2.args[0].args + + if len(argsArr) != len(argsArr2) { + return false + } + + for j := 0; j < len(argsArr); j++ { + varMap[argsArr[j].data.(int64)] = argsArr2[j].data.(int64) + } + } else if t.termType == p.Term_VAR && t2.termType == p.Term_VAR && i == 0 { + // When comparing vars use our var map + v1 := t.args[i].data.(int64) + v2 := t2.args[i].data.(int64) + + if varMap[v1] != v2 { + return false + } + } else if !v.compare(t2.args[i], varMap) { + return false + } + } + + for k, v := range t.optArgs { + if _, ok := t2.optArgs[k]; !ok { + return false + } + + if !v.compare(t2.optArgs[k], varMap) { + return false + } + } + + return true +} + +// build takes the query tree and prepares it to be sent as a JSON +// expression +func (t Term) Build() (interface{}, error) { + var err error + + if t.lastErr != nil { + return nil, t.lastErr + } + + if t.rawQuery { + return t.data, nil + } + + switch t.termType { + case p.Term_DATUM: + return t.data, nil + case p.Term_MAKE_OBJ: + res := map[string]interface{}{} + for k, v := range t.optArgs { + res[k], err = v.Build() + if err != nil { + return nil, err + } + } + return res, nil + case p.Term_BINARY: + if len(t.args) == 0 { + return map[string]interface{}{ + "$reql_type$": "BINARY", + "data": t.data, + }, nil + } + } + + args := make([]interface{}, len(t.args)) + optArgs := make(map[string]interface{}, len(t.optArgs)) + + for i, v := range t.args { + arg, err := v.Build() + if err != nil { + return nil, err + } + args[i] = arg + } + + for k, v := range t.optArgs { + optArgs[k], err = v.Build() + if err != nil { + return nil, err + } + } + + ret := []interface{}{int(t.termType)} + + if len(args) > 0 { + ret = append(ret, args) + } + if len(optArgs) > 0 { + ret = append(ret, optArgs) + } + + return ret, nil +} + +// String returns a string representation of the query tree +func (t Term) String() string { + if t.isMockAnything { + return "r.MockAnything()" + } + + switch t.termType { + case p.Term_MAKE_ARRAY: + return fmt.Sprintf("[%s]", strings.Join(argsToStringSlice(t.args), ", ")) + case p.Term_MAKE_OBJ: + return fmt.Sprintf("{%s}", strings.Join(optArgsToStringSlice(t.optArgs), ", ")) + case p.Term_FUNC: + // Get string representation of each argument + args := []string{} + for _, v := range t.args[0].args { + args = append(args, fmt.Sprintf("var_%d", v.data)) + } + + return fmt.Sprintf("func(%s r.Term) r.Term { return %s }", + strings.Join(args, ", "), + t.args[1].String(), + ) + case p.Term_VAR: + return fmt.Sprintf("var_%s", t.args[0]) + case p.Term_IMPLICIT_VAR: + return "r.Row" + case p.Term_DATUM: + switch v := t.data.(type) { + case string: + return strconv.Quote(v) + default: + return fmt.Sprintf("%v", v) + } + case p.Term_BINARY: + if len(t.args) == 0 { + return fmt.Sprintf("r.binary()") + } + } + + if t.rootTerm { + return fmt.Sprintf("r.%s(%s)", t.name, strings.Join(allArgsToStringSlice(t.args, t.optArgs), ", ")) + } + + if t.args == nil { + return "r" + } + + return fmt.Sprintf("%s.%s(%s)", t.args[0].String(), t.name, strings.Join(allArgsToStringSlice(t.args[1:], t.optArgs), ", ")) +} + +// OptArgs is an interface used to represent a terms optional arguments. All +// optional argument types have a toMap function, the returned map can be encoded +// and sent as part of the query. +type OptArgs interface { + toMap() map[string]interface{} +} + +func (t Term) OptArgs(args interface{}) Term { + switch args := args.(type) { + case OptArgs: + t.optArgs = convertTermObj(args.toMap()) + case map[string]interface{}: + t.optArgs = convertTermObj(args) + } + + return t +} + +type QueryExecutor interface { + IsConnected() bool + Query(Query) (*Cursor, error) + Exec(Query) error + + newQuery(t Term, opts map[string]interface{}) (Query, error) +} + +// WriteResponse is a helper type used when dealing with the response of a +// write query. It is also returned by the RunWrite function. +type WriteResponse struct { + Errors int `gorethink:"errors"` + Inserted int `gorethink:"inserted"` + Updated int `gorethink:"updated"` + Unchanged int `gorethink:"unchanged"` + Replaced int `gorethink:"replaced"` + Renamed int `gorethink:"renamed"` + Skipped int `gorethink:"skipped"` + Deleted int `gorethink:"deleted"` + Created int `gorethink:"created"` + DBsCreated int `gorethink:"dbs_created"` + TablesCreated int `gorethink:"tables_created"` + Dropped int `gorethink:"dropped"` + DBsDropped int `gorethink:"dbs_dropped"` + TablesDropped int `gorethink:"tables_dropped"` + GeneratedKeys []string `gorethink:"generated_keys"` + FirstError string `gorethink:"first_error"` // populated if Errors > 0 + ConfigChanges []ChangeResponse `gorethink:"config_changes"` + Changes []ChangeResponse +} + +// ChangeResponse is a helper type used when dealing with changefeeds. The type +// contains both the value before the query and the new value. +type ChangeResponse struct { + NewValue interface{} `gorethink:"new_val,omitempty"` + OldValue interface{} `gorethink:"old_val,omitempty"` + State string `gorethink:"state,omitempty"` + Error string `gorethink:"error,omitempty"` +} + +// RunOpts contains the optional arguments for the Run function. +type RunOpts struct { + DB interface{} `gorethink:"db,omitempty"` + Db interface{} `gorethink:"db,omitempty"` // Deprecated + Profile interface{} `gorethink:"profile,omitempty"` + Durability interface{} `gorethink:"durability,omitempty"` + UseOutdated interface{} `gorethink:"use_outdated,omitempty"` // Deprecated + ArrayLimit interface{} `gorethink:"array_limit,omitempty"` + TimeFormat interface{} `gorethink:"time_format,omitempty"` + GroupFormat interface{} `gorethink:"group_format,omitempty"` + BinaryFormat interface{} `gorethink:"binary_format,omitempty"` + GeometryFormat interface{} `gorethink:"geometry_format,omitempty"` + ReadMode interface{} `gorethink:"read_mode,omitempty"` + + MinBatchRows interface{} `gorethink:"min_batch_rows,omitempty"` + MaxBatchRows interface{} `gorethink:"max_batch_rows,omitempty"` + MaxBatchBytes interface{} `gorethink:"max_batch_bytes,omitempty"` + MaxBatchSeconds interface{} `gorethink:"max_batch_seconds,omitempty"` + FirstBatchScaledownFactor interface{} `gorethink:"first_batch_scaledown_factor,omitempty"` +} + +func (o RunOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Run runs a query using the given connection. +// +// rows, err := query.Run(sess) +// if err != nil { +// // error +// } +// +// var doc MyDocumentType +// for rows.Next(&doc) { +// // Do something with document +// } +func (t Term) Run(s QueryExecutor, optArgs ...RunOpts) (*Cursor, error) { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + if s == nil || !s.IsConnected() { + return nil, ErrConnectionClosed + } + + q, err := s.newQuery(t, opts) + if err != nil { + return nil, err + } + + return s.Query(q) +} + +// RunWrite runs a query using the given connection but unlike Run automatically +// scans the result into a variable of type WriteResponse. This function should be used +// if you are running a write query (such as Insert, Update, TableCreate, etc...). +// +// If an error occurs when running the write query the first error is returned. +// +// res, err := r.DB("database").Table("table").Insert(doc).RunWrite(sess) +func (t Term) RunWrite(s QueryExecutor, optArgs ...RunOpts) (WriteResponse, error) { + var response WriteResponse + + res, err := t.Run(s, optArgs...) + if err != nil { + return response, err + } + defer res.Close() + + if err = res.One(&response); err != nil { + return response, err + } + + if response.Errors > 0 { + return response, fmt.Errorf("%s", response.FirstError) + } + + return response, nil +} + +// ReadOne is a shortcut method that runs the query on the given connection +// and reads one response from the cursor before closing it. +// +// It returns any errors encountered from running the query or reading the response +func (t Term) ReadOne(dest interface{}, s QueryExecutor, optArgs ...RunOpts) error { + res, err := t.Run(s, optArgs...) + if err != nil { + return err + } + return res.One(dest) +} + +// ReadAll is a shortcut method that runs the query on the given connection +// and reads all of the responses from the cursor before closing it. +// +// It returns any errors encountered from running the query or reading the responses +func (t Term) ReadAll(dest interface{}, s QueryExecutor, optArgs ...RunOpts) error { + res, err := t.Run(s, optArgs...) + if err != nil { + return err + } + return res.All(dest) +} + +// ExecOpts contains the optional arguments for the Exec function and inherits +// its options from RunOpts, the only difference is the addition of the NoReply +// field. +// +// When NoReply is true it causes the driver not to wait to receive the result +// and return immediately. +type ExecOpts struct { + DB interface{} `gorethink:"db,omitempty"` + Db interface{} `gorethink:"db,omitempty"` // Deprecated + Profile interface{} `gorethink:"profile,omitempty"` + Durability interface{} `gorethink:"durability,omitempty"` + UseOutdated interface{} `gorethink:"use_outdated,omitempty"` // Deprecated + ArrayLimit interface{} `gorethink:"array_limit,omitempty"` + TimeFormat interface{} `gorethink:"time_format,omitempty"` + GroupFormat interface{} `gorethink:"group_format,omitempty"` + BinaryFormat interface{} `gorethink:"binary_format,omitempty"` + GeometryFormat interface{} `gorethink:"geometry_format,omitempty"` + + MinBatchRows interface{} `gorethink:"min_batch_rows,omitempty"` + MaxBatchRows interface{} `gorethink:"max_batch_rows,omitempty"` + MaxBatchBytes interface{} `gorethink:"max_batch_bytes,omitempty"` + MaxBatchSeconds interface{} `gorethink:"max_batch_seconds,omitempty"` + FirstBatchScaledownFactor interface{} `gorethink:"first_batch_scaledown_factor,omitempty"` + + NoReply interface{} `gorethink:"noreply,omitempty"` +} + +func (o ExecOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Exec runs the query but does not return the result. Exec will still wait for +// the response to be received unless the NoReply field is true. +// +// err := r.DB("database").Table("table").Insert(doc).Exec(sess, r.ExecOpts{ +// NoReply: true, +// }) +func (t Term) Exec(s QueryExecutor, optArgs ...ExecOpts) error { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + if s == nil || !s.IsConnected() { + return ErrConnectionClosed + } + + q, err := s.newQuery(t, opts) + if err != nil { + return err + } + + return s.Exec(q) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_admin.go b/vendor/github.com/GoRethink/gorethink/query_admin.go new file mode 100644 index 0000000..90ada69 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_admin.go @@ -0,0 +1,85 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Config can be used to read and/or update the configurations for individual +// tables or databases. +func (t Term) Config() Term { + return constructMethodTerm(t, "Config", p.Term_CONFIG, []interface{}{}, map[string]interface{}{}) +} + +// Rebalance rebalances the shards of a table. When called on a database, all +// the tables in that database will be rebalanced. +func (t Term) Rebalance() Term { + return constructMethodTerm(t, "Rebalance", p.Term_REBALANCE, []interface{}{}, map[string]interface{}{}) +} + +// ReconfigureOpts contains the optional arguments for the Reconfigure term. +type ReconfigureOpts struct { + Shards interface{} `gorethink:"shards,omitempty"` + Replicas interface{} `gorethink:"replicas,omitempty"` + DryRun interface{} `gorethink:"dry_run,omitempty"` + EmergencyRepair interface{} `gorethink:"emergency_repair,omitempty"` + NonVotingReplicaTags interface{} `gorethink:"nonvoting_replica_tags,omitempty"` + PrimaryReplicaTag interface{} `gorethink:"primary_replica_tag,omitempty"` +} + +func (o ReconfigureOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Reconfigure a table's sharding and replication. +func (t Term) Reconfigure(optArgs ...ReconfigureOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Reconfigure", p.Term_RECONFIGURE, []interface{}{}, opts) +} + +// Status return the status of a table +func (t Term) Status() Term { + return constructMethodTerm(t, "Status", p.Term_STATUS, []interface{}{}, map[string]interface{}{}) +} + +// WaitOpts contains the optional arguments for the Wait term. +type WaitOpts struct { + WaitFor interface{} `gorethink:"wait_for,omitempty"` + Timeout interface{} `gorethink:"timeout,omitempty"` +} + +func (o WaitOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Wait for a table or all the tables in a database to be ready. A table may be +// temporarily unavailable after creation, rebalancing or reconfiguring. The +// wait command blocks until the given table (or database) is fully up to date. +// +// Deprecated: This function is not supported by RethinkDB 2.3 and above. +func Wait(optArgs ...WaitOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructRootTerm("Wait", p.Term_WAIT, []interface{}{}, opts) +} + +// Wait for a table or all the tables in a database to be ready. A table may be +// temporarily unavailable after creation, rebalancing or reconfiguring. The +// wait command blocks until the given table (or database) is fully up to date. +func (t Term) Wait(optArgs ...WaitOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Wait", p.Term_WAIT, []interface{}{}, opts) +} + +// Grant modifies access permissions for a user account, globally or on a +// per-database or per-table basis. +func (t Term) Grant(args ...interface{}) Term { + return constructMethodTerm(t, "Grant", p.Term_GRANT, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_aggregation.go b/vendor/github.com/GoRethink/gorethink/query_aggregation.go new file mode 100644 index 0000000..e5cc787 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_aggregation.go @@ -0,0 +1,362 @@ +package gorethink + +import p "gopkg.in/gorethink/gorethink.v2/ql2" + +// Aggregation +// These commands are used to compute smaller values from large sequences. + +// Reduce produces a single value from a sequence through repeated application +// of a reduction function +// +// It takes one argument of type `func (r.Term, r.Term) interface{}`, for +// example this query sums all elements in an array: +// +// r.Expr([]int{1,3,6}).Reduce(func (left, right r.Term) interface{} { +// return left.Add(right) +// }) +func (t Term) Reduce(args ...interface{}) Term { + return constructMethodTerm(t, "Reduce", p.Term_REDUCE, funcWrapArgs(args), map[string]interface{}{}) +} + +// DistinctOpts contains the optional arguments for the Distinct term +type DistinctOpts struct { + Index interface{} `gorethink:"index,omitempty"` +} + +func (o DistinctOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Distinct removes duplicate elements from the sequence. +func Distinct(arg interface{}, optArgs ...DistinctOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructRootTerm("Distinct", p.Term_DISTINCT, []interface{}{arg}, opts) +} + +// Distinct removes duplicate elements from the sequence. +func (t Term) Distinct(optArgs ...DistinctOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Distinct", p.Term_DISTINCT, []interface{}{}, opts) +} + +// GroupOpts contains the optional arguments for the Group term +type GroupOpts struct { + Index interface{} `gorethink:"index,omitempty"` + Multi interface{} `gorethink:"multi,omitempty"` +} + +func (o GroupOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Group takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +func Group(fieldOrFunctions ...interface{}) Term { + return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{}) +} + +// MultiGroup takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +// +// Unlike Group single documents can be assigned to multiple groups, similar +// to the behavior of multi-indexes. When the grouping value is an array, documents +// will be placed in each group that corresponds to the elements of the array. If +// the array is empty the row will be ignored. +func MultiGroup(fieldOrFunctions ...interface{}) Term { + return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{ + "multi": true, + }) +} + +// GroupByIndex takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +func GroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term { + return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{ + "index": index, + }) +} + +// MultiGroupByIndex takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +// +// Unlike Group single documents can be assigned to multiple groups, similar +// to the behavior of multi-indexes. When the grouping value is an array, documents +// will be placed in each group that corresponds to the elements of the array. If +// the array is empty the row will be ignored. +func MultiGroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term { + return constructRootTerm("Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{ + "index": index, + "mutli": true, + }) +} + +// Group takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +func (t Term) Group(fieldOrFunctions ...interface{}) Term { + return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{}) +} + +// MultiGroup takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +// +// Unlike Group single documents can be assigned to multiple groups, similar +// to the behavior of multi-indexes. When the grouping value is an array, documents +// will be placed in each group that corresponds to the elements of the array. If +// the array is empty the row will be ignored. +func (t Term) MultiGroup(fieldOrFunctions ...interface{}) Term { + return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{ + "multi": true, + }) +} + +// GroupByIndex takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +func (t Term) GroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term { + return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{ + "index": index, + }) +} + +// MultiGroupByIndex takes a stream and partitions it into multiple groups based on the +// fields or functions provided. Commands chained after group will be +// called on each of these grouped sub-streams, producing grouped data. +// +// Unlike Group single documents can be assigned to multiple groups, similar +// to the behavior of multi-indexes. When the grouping value is an array, documents +// will be placed in each group that corresponds to the elements of the array. If +// the array is empty the row will be ignored. +func (t Term) MultiGroupByIndex(index interface{}, fieldOrFunctions ...interface{}) Term { + return constructMethodTerm(t, "Group", p.Term_GROUP, funcWrapArgs(fieldOrFunctions), map[string]interface{}{ + "index": index, + "mutli": true, + }) +} + +// Ungroup takes a grouped stream or grouped data and turns it into an array of +// objects representing the groups. Any commands chained after Ungroup will +// operate on this array, rather than operating on each group individually. +// This is useful if you want to e.g. order the groups by the value of their +// reduction. +func (t Term) Ungroup(args ...interface{}) Term { + return constructMethodTerm(t, "Ungroup", p.Term_UNGROUP, args, map[string]interface{}{}) +} + +// Contains returns whether or not a sequence contains all the specified values, +// or if functions are provided instead, returns whether or not a sequence +// contains values matching all the specified functions. +func Contains(args ...interface{}) Term { + return constructRootTerm("Contains", p.Term_CONTAINS, funcWrapArgs(args), map[string]interface{}{}) +} + +// Contains returns whether or not a sequence contains all the specified values, +// or if functions are provided instead, returns whether or not a sequence +// contains values matching all the specified functions. +func (t Term) Contains(args ...interface{}) Term { + return constructMethodTerm(t, "Contains", p.Term_CONTAINS, funcWrapArgs(args), map[string]interface{}{}) +} + +// Aggregators +// These standard aggregator objects are to be used in conjunction with Group. + +// Count the number of elements in the sequence. With a single argument, +// count the number of elements equal to it. If the argument is a function, +// it is equivalent to calling filter before count. +func Count(args ...interface{}) Term { + return constructRootTerm("Count", p.Term_COUNT, funcWrapArgs(args), map[string]interface{}{}) +} + +// Count the number of elements in the sequence. With a single argument, +// count the number of elements equal to it. If the argument is a function, +// it is equivalent to calling filter before count. +func (t Term) Count(args ...interface{}) Term { + return constructMethodTerm(t, "Count", p.Term_COUNT, funcWrapArgs(args), map[string]interface{}{}) +} + +// Sum returns the sum of all the elements of a sequence. If called with a field +// name, sums all the values of that field in the sequence, skipping elements of +// the sequence that lack that field. If called with a function, calls that +// function on every element of the sequence and sums the results, skipping +// elements of the sequence where that function returns null or a non-existence +// error. +func Sum(args ...interface{}) Term { + return constructRootTerm("Sum", p.Term_SUM, funcWrapArgs(args), map[string]interface{}{}) +} + +// Sum returns the sum of all the elements of a sequence. If called with a field +// name, sums all the values of that field in the sequence, skipping elements of +// the sequence that lack that field. If called with a function, calls that +// function on every element of the sequence and sums the results, skipping +// elements of the sequence where that function returns null or a non-existence +// error. +func (t Term) Sum(args ...interface{}) Term { + return constructMethodTerm(t, "Sum", p.Term_SUM, funcWrapArgs(args), map[string]interface{}{}) +} + +// Avg returns the average of all the elements of a sequence. If called with a field +// name, averages all the values of that field in the sequence, skipping elements of +// the sequence that lack that field. If called with a function, calls that function +// on every element of the sequence and averages the results, skipping elements of the +// sequence where that function returns null or a non-existence error. +func Avg(args ...interface{}) Term { + return constructRootTerm("Avg", p.Term_AVG, funcWrapArgs(args), map[string]interface{}{}) +} + +// Avg returns the average of all the elements of a sequence. If called with a field +// name, averages all the values of that field in the sequence, skipping elements of +// the sequence that lack that field. If called with a function, calls that function +// on every element of the sequence and averages the results, skipping elements of the +// sequence where that function returns null or a non-existence error. +func (t Term) Avg(args ...interface{}) Term { + return constructMethodTerm(t, "Avg", p.Term_AVG, funcWrapArgs(args), map[string]interface{}{}) +} + +// MinOpts contains the optional arguments for the Min term +type MinOpts struct { + Index interface{} `gorethink:"index,omitempty"` +} + +func (o MinOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Min finds the minimum of a sequence. If called with a field name, finds the element +// of that sequence with the smallest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the smallest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func Min(args ...interface{}) Term { + return constructRootTerm("Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{}) +} + +// Min finds the minimum of a sequence. If called with a field name, finds the element +// of that sequence with the smallest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the smallest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func (t Term) Min(args ...interface{}) Term { + return constructMethodTerm(t, "Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{}) +} + +// MinIndex finds the minimum of a sequence. If called with a field name, finds the element +// of that sequence with the smallest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the smallest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func MinIndex(index interface{}, args ...interface{}) Term { + return constructRootTerm("Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{ + "index": index, + }) +} + +// MinIndex finds the minimum of a sequence. If called with a field name, finds the element +// of that sequence with the smallest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the smallest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func (t Term) MinIndex(index interface{}, args ...interface{}) Term { + return constructMethodTerm(t, "Min", p.Term_MIN, funcWrapArgs(args), map[string]interface{}{ + "index": index, + }) +} + +// MaxOpts contains the optional arguments for the Max term +type MaxOpts struct { + Index interface{} `gorethink:"index,omitempty"` +} + +func (o MaxOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Max finds the maximum of a sequence. If called with a field name, finds the element +// of that sequence with the largest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the largest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func Max(args ...interface{}) Term { + return constructRootTerm("Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{}) +} + +// Max finds the maximum of a sequence. If called with a field name, finds the element +// of that sequence with the largest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the largest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func (t Term) Max(args ...interface{}) Term { + return constructMethodTerm(t, "Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{}) +} + +// MaxIndex finds the maximum of a sequence. If called with a field name, finds the element +// of that sequence with the largest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the largest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func MaxIndex(index interface{}, args ...interface{}) Term { + return constructRootTerm("Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{ + "index": index, + }) +} + +// MaxIndex finds the maximum of a sequence. If called with a field name, finds the element +// of that sequence with the largest value in that field. If called with a function, +// calls that function on every element of the sequence and returns the element +// which produced the largest value, ignoring any elements where the function +// returns null or produces a non-existence error. +func (t Term) MaxIndex(index interface{}, args ...interface{}) Term { + return constructMethodTerm(t, "Max", p.Term_MAX, funcWrapArgs(args), map[string]interface{}{ + "index": index, + }) +} + +// FoldOpts contains the optional arguments for the Fold term +type FoldOpts struct { + Emit interface{} `gorethink:"emit,omitempty"` + FinalEmit interface{} `gorethink:"final_emit,omitempty"` +} + +func (o FoldOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Fold applies a function to a sequence in order, maintaining state via an +// accumulator. The Fold command returns either a single value or a new sequence. +// +// In its first form, Fold operates like Reduce, returning a value by applying a +// combining function to each element in a sequence, passing the current element +// and the previous reduction result to the function. However, Fold has the +// following differences from Reduce: +// - it is guaranteed to proceed through the sequence from first element to last. +// - it passes an initial base value to the function with the first element in +// place of the previous reduction result. +// +// In its second form, Fold operates like ConcatMap, returning a new sequence +// rather than a single value. When an emit function is provided, Fold will: +// - proceed through the sequence in order and take an initial base value, as above. +// - for each element in the sequence, call both the combining function and a +// separate emitting function with the current element and previous reduction result. +// - optionally pass the result of the combining function to the emitting function. +// +// If provided, the emitting function must return a list. +func (t Term) Fold(base, fn interface{}, optArgs ...FoldOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + args := []interface{}{base, funcWrap(fn)} + + return constructMethodTerm(t, "Fold", p.Term_FOLD, args, opts) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_control.go b/vendor/github.com/GoRethink/gorethink/query_control.go new file mode 100644 index 0000000..596ff77 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_control.go @@ -0,0 +1,395 @@ +package gorethink + +import ( + "encoding/base64" + "encoding/json" + + "reflect" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Expr converts any value to an expression and is also used by many other terms +// such as Insert and Update. This function can convert the following basic Go +// types (bool, int, uint, string, float) and even pointers, maps and structs. +// +// When evaluating structs they are encoded into a map before being sent to the +// server. Each exported field is added to the map unless +// +// - the field's tag is "-", or +// - the field is empty and its tag specifies the "omitempty" option. +// +// Each fields default name in the map is the field name but can be specified +// in the struct field's tag value. The "gorethink" key in the struct field's +// tag value is the key name, followed by an optional comma and options. Examples: +// +// // Field is ignored by this package. +// Field int `gorethink:"-"` +// // Field appears as key "myName". +// Field int `gorethink:"myName"` +// // Field appears as key "myName" and +// // the field is omitted from the object if its value is empty, +// // as defined above. +// Field int `gorethink:"myName,omitempty"` +// // Field appears as key "Field" (the default), but +// // the field is skipped if empty. +// // Note the leading comma. +// Field int `gorethink:",omitempty"` +func Expr(val interface{}) Term { + if val == nil { + return Term{ + termType: p.Term_DATUM, + data: nil, + } + } + + switch val := val.(type) { + case Term: + return val + case []interface{}: + vals := make([]Term, len(val)) + for i, v := range val { + vals[i] = Expr(v) + } + + return makeArray(vals) + case map[string]interface{}: + vals := make(map[string]Term, len(val)) + for k, v := range val { + vals[k] = Expr(v) + } + + return makeObject(vals) + case + bool, + int, + int8, + int16, + int32, + int64, + uint, + uint8, + uint16, + uint32, + uint64, + float32, + float64, + uintptr, + string, + *bool, + *int, + *int8, + *int16, + *int32, + *int64, + *uint, + *uint8, + *uint16, + *uint32, + *uint64, + *float32, + *float64, + *uintptr, + *string: + return Term{ + termType: p.Term_DATUM, + data: val, + } + default: + // Use reflection to check for other types + valType := reflect.TypeOf(val) + valValue := reflect.ValueOf(val) + + switch valType.Kind() { + case reflect.Func: + return makeFunc(val) + case reflect.Struct, reflect.Map, reflect.Ptr: + data, err := encode(val) + + if err != nil || data == nil { + return Term{ + termType: p.Term_DATUM, + data: nil, + lastErr: err, + } + } + + return Expr(data) + + case reflect.Slice, reflect.Array: + // Check if slice is a byte slice + if valType.Elem().Kind() == reflect.Uint8 { + data, err := encode(val) + + if err != nil || data == nil { + return Term{ + termType: p.Term_DATUM, + data: nil, + lastErr: err, + } + } + + return Expr(data) + } + + vals := make([]Term, valValue.Len()) + for i := 0; i < valValue.Len(); i++ { + vals[i] = Expr(valValue.Index(i).Interface()) + } + + return makeArray(vals) + default: + data, err := encode(val) + + if err != nil || data == nil { + return Term{ + termType: p.Term_DATUM, + data: nil, + lastErr: err, + } + } + + return Term{ + termType: p.Term_DATUM, + data: data, + } + } + } +} + +// JSOpts contains the optional arguments for the JS term +type JSOpts struct { + Timeout interface{} `gorethink:"timeout,omitempty"` +} + +func (o JSOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// JS creates a JavaScript expression which is evaluated by the database when +// running the query. +func JS(jssrc interface{}, optArgs ...JSOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructRootTerm("Js", p.Term_JAVASCRIPT, []interface{}{jssrc}, opts) +} + +// HTTPOpts contains the optional arguments for the HTTP term +type HTTPOpts struct { + // General Options + Timeout interface{} `gorethink:"timeout,omitempty"` + Reattempts interface{} `gorethink:"reattempts,omitempty"` + Redirects interface{} `gorethink:"redirect,omitempty"` + Verify interface{} `gorethink:"verify,omitempty"` + ResultFormat interface{} `gorethink:"resul_format,omitempty"` + + // Request Options + Method interface{} `gorethink:"method,omitempty"` + Auth interface{} `gorethink:"auth,omitempty"` + Params interface{} `gorethink:"params,omitempty"` + Header interface{} `gorethink:"header,omitempty"` + Data interface{} `gorethink:"data,omitempty"` + + // Pagination + Page interface{} `gorethink:"page,omitempty"` + PageLimit interface{} `gorethink:"page_limit,omitempty"` +} + +func (o HTTPOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// HTTP retrieves data from the specified URL over HTTP. The return type depends +// on the resultFormat option, which checks the Content-Type of the response by +// default. +func HTTP(url interface{}, optArgs ...HTTPOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructRootTerm("Http", p.Term_HTTP, []interface{}{url}, opts) +} + +// JSON parses a JSON string on the server. +func JSON(args ...interface{}) Term { + return constructRootTerm("Json", p.Term_JSON, args, map[string]interface{}{}) +} + +// Error throws a runtime error. If called with no arguments inside the second argument +// to `default`, re-throw the current error. +func Error(args ...interface{}) Term { + return constructRootTerm("Error", p.Term_ERROR, args, map[string]interface{}{}) +} + +// Args is a special term usd to splice an array of arguments into another term. +// This is useful when you want to call a varadic term such as GetAll with a set +// of arguments provided at runtime. +func Args(args ...interface{}) Term { + return constructRootTerm("Args", p.Term_ARGS, args, map[string]interface{}{}) +} + +// Binary encapsulates binary data within a query. +// +// The type of data binary accepts depends on the client language. In Go, it +// expects either a byte array/slice or a bytes.Buffer. +// +// Only a limited subset of ReQL commands may be chained after binary: +// - coerceTo can coerce binary objects to string types +// - count will return the number of bytes in the object +// - slice will treat bytes like array indexes (i.e., slice(10,20) will return bytes 10–19) +// - typeOf returns PTYPE +// - info will return information on a binary object. +func Binary(data interface{}) Term { + var b []byte + + switch data := data.(type) { + case Term: + return constructRootTerm("Binary", p.Term_BINARY, []interface{}{data}, map[string]interface{}{}) + case []byte: + b = data + default: + typ := reflect.TypeOf(data) + if typ.Kind() == reflect.Slice && typ.Elem().Kind() == reflect.Uint8 { + return Binary(reflect.ValueOf(data).Bytes()) + } else if typ.Kind() == reflect.Array && typ.Elem().Kind() == reflect.Uint8 { + v := reflect.ValueOf(data) + b = make([]byte, v.Len()) + for i := 0; i < v.Len(); i++ { + b[i] = v.Index(i).Interface().(byte) + } + return Binary(b) + } + panic("Unsupported binary type") + } + + return binaryTerm(base64.StdEncoding.EncodeToString(b)) +} + +func binaryTerm(data string) Term { + t := constructRootTerm("Binary", p.Term_BINARY, []interface{}{}, map[string]interface{}{}) + t.data = data + + return t +} + +// Do evaluates the expr in the context of one or more value bindings. The type of +// the result is the type of the value returned from expr. +func (t Term) Do(args ...interface{}) Term { + newArgs := []interface{}{} + newArgs = append(newArgs, funcWrap(args[len(args)-1])) + newArgs = append(newArgs, t) + newArgs = append(newArgs, args[:len(args)-1]...) + + return constructRootTerm("Do", p.Term_FUNCALL, newArgs, map[string]interface{}{}) +} + +// Do evaluates the expr in the context of one or more value bindings. The type of +// the result is the type of the value returned from expr. +func Do(args ...interface{}) Term { + newArgs := []interface{}{} + newArgs = append(newArgs, funcWrap(args[len(args)-1])) + newArgs = append(newArgs, args[:len(args)-1]...) + + return constructRootTerm("Do", p.Term_FUNCALL, newArgs, map[string]interface{}{}) +} + +// Branch evaluates one of two control paths based on the value of an expression. +// branch is effectively an if renamed due to language constraints. +// +// The type of the result is determined by the type of the branch that gets executed. +func Branch(args ...interface{}) Term { + return constructRootTerm("Branch", p.Term_BRANCH, args, map[string]interface{}{}) +} + +// Branch evaluates one of two control paths based on the value of an expression. +// branch is effectively an if renamed due to language constraints. +// +// The type of the result is determined by the type of the branch that gets executed. +func (t Term) Branch(args ...interface{}) Term { + return constructMethodTerm(t, "Branch", p.Term_BRANCH, args, map[string]interface{}{}) +} + +// ForEach loops over a sequence, evaluating the given write query for each element. +// +// It takes one argument of type `func (r.Term) interface{}`, for +// example clones a table: +// +// r.Table("table").ForEach(func (row r.Term) interface{} { +// return r.Table("new_table").Insert(row) +// }) +func (t Term) ForEach(args ...interface{}) Term { + return constructMethodTerm(t, "Foreach", p.Term_FOR_EACH, funcWrapArgs(args), map[string]interface{}{}) +} + +// Range generates a stream of sequential integers in a specified range. It +// accepts 0, 1, or 2 arguments, all of which should be numbers. +func Range(args ...interface{}) Term { + return constructRootTerm("Range", p.Term_RANGE, args, map[string]interface{}{}) +} + +// Default handles non-existence errors. Tries to evaluate and return its first argument. +// If an error related to the absence of a value is thrown in the process, or if +// its first argument returns null, returns its second argument. (Alternatively, +// the second argument may be a function which will be called with either the +// text of the non-existence error or null.) +func (t Term) Default(args ...interface{}) Term { + return constructMethodTerm(t, "Default", p.Term_DEFAULT, args, map[string]interface{}{}) +} + +// CoerceTo converts a value of one type into another. +// +// You can convert: a selection, sequence, or object into an ARRAY, an array of +// pairs into an OBJECT, and any DATUM into a STRING. +func (t Term) CoerceTo(args ...interface{}) Term { + return constructMethodTerm(t, "CoerceTo", p.Term_COERCE_TO, args, map[string]interface{}{}) +} + +// TypeOf gets the type of a value. +func TypeOf(args ...interface{}) Term { + return constructRootTerm("TypeOf", p.Term_TYPE_OF, args, map[string]interface{}{}) +} + +// TypeOf gets the type of a value. +func (t Term) TypeOf(args ...interface{}) Term { + return constructMethodTerm(t, "TypeOf", p.Term_TYPE_OF, args, map[string]interface{}{}) +} + +// ToJSON converts a ReQL value or object to a JSON string. +func (t Term) ToJSON() Term { + return constructMethodTerm(t, "ToJSON", p.Term_TO_JSON_STRING, []interface{}{}, map[string]interface{}{}) +} + +// Info gets information about a RQL value. +func (t Term) Info(args ...interface{}) Term { + return constructMethodTerm(t, "Info", p.Term_INFO, args, map[string]interface{}{}) +} + +// UUID returns a UUID (universally unique identifier), a string that can be used +// as a unique ID. If a string is passed to uuid as an argument, the UUID will be +// deterministic, derived from the string’s SHA-1 hash. +func UUID(args ...interface{}) Term { + return constructRootTerm("UUID", p.Term_UUID, args, map[string]interface{}{}) +} + +// RawQuery creates a new query from a JSON string, this bypasses any encoding +// done by GoRethink. The query should not contain the query type or any options +// as this should be handled using the normal driver API. +// +// THis query will only work if this is the only term in the query. +func RawQuery(q []byte) Term { + data := json.RawMessage(q) + return Term{ + name: "RawQuery", + rootTerm: true, + rawQuery: true, + data: &data, + args: []Term{ + Term{ + termType: p.Term_DATUM, + data: string(q), + }, + }, + } +} diff --git a/vendor/github.com/GoRethink/gorethink/query_db.go b/vendor/github.com/GoRethink/gorethink/query_db.go new file mode 100644 index 0000000..66f847c --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_db.go @@ -0,0 +1,25 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// DBCreate creates a database. A RethinkDB database is a collection of tables, +// similar to relational databases. +// +// Note: that you can only use alphanumeric characters and underscores for the +// database name. +func DBCreate(args ...interface{}) Term { + return constructRootTerm("DBCreate", p.Term_DB_CREATE, args, map[string]interface{}{}) +} + +// DBDrop drops a database. The database, all its tables, and corresponding data +// will be deleted. +func DBDrop(args ...interface{}) Term { + return constructRootTerm("DBDrop", p.Term_DB_DROP, args, map[string]interface{}{}) +} + +// DBList lists all database names in the system. +func DBList(args ...interface{}) Term { + return constructRootTerm("DBList", p.Term_DB_LIST, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_geospatial.go b/vendor/github.com/GoRethink/gorethink/query_geospatial.go new file mode 100644 index 0000000..7945dd7 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_geospatial.go @@ -0,0 +1,170 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// CircleOpts contains the optional arguments for the Circle term. +type CircleOpts struct { + NumVertices interface{} `gorethink:"num_vertices,omitempty"` + GeoSystem interface{} `gorethink:"geo_system,omitempty"` + Unit interface{} `gorethink:"unit,omitempty"` + Fill interface{} `gorethink:"fill,omitempty"` +} + +func (o CircleOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Circle constructs a circular line or polygon. A circle in RethinkDB is +// a polygon or line approximating a circle of a given radius around a given +// center, consisting of a specified number of vertices (default 32). +func Circle(point, radius interface{}, optArgs ...CircleOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + return constructRootTerm("Circle", p.Term_CIRCLE, []interface{}{point, radius}, opts) +} + +// DistanceOpts contains the optional arguments for the Distance term. +type DistanceOpts struct { + GeoSystem interface{} `gorethink:"geo_system,omitempty"` + Unit interface{} `gorethink:"unit,omitempty"` +} + +func (o DistanceOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Distance calculates the Haversine distance between two points. At least one +// of the geometry objects specified must be a point. +func (t Term) Distance(point interface{}, optArgs ...DistanceOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + return constructMethodTerm(t, "Distance", p.Term_DISTANCE, []interface{}{point}, opts) +} + +// Distance calculates the Haversine distance between two points. At least one +// of the geometry objects specified must be a point. +func Distance(point1, point2 interface{}, optArgs ...DistanceOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + return constructRootTerm("Distance", p.Term_DISTANCE, []interface{}{point1, point2}, opts) +} + +// Fill converts a Line object into a Polygon object. If the last point does not +// specify the same coordinates as the first point, polygon will close the +// polygon by connecting them +func (t Term) Fill() Term { + return constructMethodTerm(t, "Fill", p.Term_FILL, []interface{}{}, map[string]interface{}{}) +} + +// GeoJSON converts a GeoJSON object to a ReQL geometry object. +func GeoJSON(args ...interface{}) Term { + return constructRootTerm("GeoJSON", p.Term_GEOJSON, args, map[string]interface{}{}) +} + +// ToGeoJSON converts a ReQL geometry object to a GeoJSON object. +func (t Term) ToGeoJSON(args ...interface{}) Term { + return constructMethodTerm(t, "ToGeoJSON", p.Term_TO_GEOJSON, args, map[string]interface{}{}) +} + +// GetIntersectingOpts contains the optional arguments for the GetIntersecting term. +type GetIntersectingOpts struct { + Index interface{} `gorethink:"index,omitempty"` +} + +func (o GetIntersectingOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// GetIntersecting gets all documents where the given geometry object intersects +// the geometry object of the requested geospatial index. +func (t Term) GetIntersecting(args interface{}, optArgs ...GetIntersectingOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + return constructMethodTerm(t, "GetIntersecting", p.Term_GET_INTERSECTING, []interface{}{args}, opts) +} + +// GetNearestOpts contains the optional arguments for the GetNearest term. +type GetNearestOpts struct { + Index interface{} `gorethink:"index,omitempty"` + MaxResults interface{} `gorethink:"max_results,omitempty"` + MaxDist interface{} `gorethink:"max_dist,omitempty"` + Unit interface{} `gorethink:"unit,omitempty"` + GeoSystem interface{} `gorethink:"geo_system,omitempty"` +} + +func (o GetNearestOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// GetNearest gets all documents where the specified geospatial index is within a +// certain distance of the specified point (default 100 kilometers). +func (t Term) GetNearest(point interface{}, optArgs ...GetNearestOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + + return constructMethodTerm(t, "GetNearest", p.Term_GET_NEAREST, []interface{}{point}, opts) +} + +// Includes tests whether a geometry object is completely contained within another. +// When applied to a sequence of geometry objects, includes acts as a filter, +// returning a sequence of objects from the sequence that include the argument. +func (t Term) Includes(args ...interface{}) Term { + return constructMethodTerm(t, "Includes", p.Term_INCLUDES, args, map[string]interface{}{}) +} + +// Intersects tests whether two geometry objects intersect with one another. +// When applied to a sequence of geometry objects, intersects acts as a filter, +// returning a sequence of objects from the sequence that intersect with the +// argument. +func (t Term) Intersects(args ...interface{}) Term { + return constructMethodTerm(t, "Intersects", p.Term_INTERSECTS, args, map[string]interface{}{}) +} + +// Line constructs a geometry object of type Line. The line can be specified in +// one of two ways: +// - Two or more two-item arrays, specifying longitude and latitude numbers of +// the line's vertices; +// - Two or more Point objects specifying the line's vertices. +func Line(args ...interface{}) Term { + return constructRootTerm("Line", p.Term_LINE, args, map[string]interface{}{}) +} + +// Point constructs a geometry object of type Point. The point is specified by +// two floating point numbers, the longitude (−180 to 180) and latitude +// (−90 to 90) of the point on a perfect sphere. +func Point(lon, lat interface{}) Term { + return constructRootTerm("Point", p.Term_POINT, []interface{}{lon, lat}, map[string]interface{}{}) +} + +// Polygon constructs a geometry object of type Polygon. The Polygon can be +// specified in one of two ways: +// - Three or more two-item arrays, specifying longitude and latitude numbers of the polygon's vertices; +// - Three or more Point objects specifying the polygon's vertices. +func Polygon(args ...interface{}) Term { + return constructRootTerm("Polygon", p.Term_POLYGON, args, map[string]interface{}{}) +} + +// PolygonSub "punches a hole" out of the parent polygon using the polygon passed +// to the function. +// polygon1.PolygonSub(polygon2) -> polygon +// In the example above polygon2 must be completely contained within polygon1 +// and must have no holes itself (it must not be the output of polygon_sub itself). +func (t Term) PolygonSub(args ...interface{}) Term { + return constructMethodTerm(t, "PolygonSub", p.Term_POLYGON_SUB, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_join.go b/vendor/github.com/GoRethink/gorethink/query_join.go new file mode 100644 index 0000000..e38757a --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_join.go @@ -0,0 +1,47 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// InnerJoin returns the inner product of two sequences (e.g. a table, a filter result) +// filtered by the predicate. The query compares each row of the left sequence +// with each row of the right sequence to find all pairs of rows which satisfy +// the predicate. When the predicate is satisfied, each matched pair of rows +// of both sequences are combined into a result row. +func (t Term) InnerJoin(args ...interface{}) Term { + return constructMethodTerm(t, "InnerJoin", p.Term_INNER_JOIN, args, map[string]interface{}{}) +} + +// OuterJoin computes a left outer join by retaining each row in the left table even +// if no match was found in the right table. +func (t Term) OuterJoin(args ...interface{}) Term { + return constructMethodTerm(t, "OuterJoin", p.Term_OUTER_JOIN, args, map[string]interface{}{}) +} + +// EqJoinOpts contains the optional arguments for the EqJoin term. +type EqJoinOpts struct { + Index interface{} `gorethink:"index,omitempty"` + Ordered interface{} `gorethink:"ordered,omitempty"` +} + +func (o EqJoinOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// EqJoin is an efficient join that looks up elements in the right table by primary key. +// +// Optional arguments: "index" (string - name of the index to use in right table instead of the primary key) +func (t Term) EqJoin(left, right interface{}, optArgs ...EqJoinOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "EqJoin", p.Term_EQ_JOIN, []interface{}{funcWrap(left), right}, opts) +} + +// Zip is used to 'zip' up the result of a join by merging the 'right' fields into 'left' +// fields of each member of the sequence. +func (t Term) Zip(args ...interface{}) Term { + return constructMethodTerm(t, "Zip", p.Term_ZIP, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_manipulation.go b/vendor/github.com/GoRethink/gorethink/query_manipulation.go new file mode 100644 index 0000000..45bac25 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_manipulation.go @@ -0,0 +1,121 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Row returns the currently visited document. Note that Row does not work within +// subqueries to access nested documents; you should use anonymous functions to +// access those documents instead. Also note that unlike in other drivers to +// access a rows fields you should call Field. For example: +// r.row("fieldname") should instead be r.Row.Field("fieldname") +var Row = constructRootTerm("Doc", p.Term_IMPLICIT_VAR, []interface{}{}, map[string]interface{}{}) + +// Literal replaces an object in a field instead of merging it with an existing +// object in a merge or update operation. +func Literal(args ...interface{}) Term { + return constructRootTerm("Literal", p.Term_LITERAL, args, map[string]interface{}{}) +} + +// Field gets a single field from an object. If called on a sequence, gets that field +// from every object in the sequence, skipping objects that lack it. +func (t Term) Field(args ...interface{}) Term { + return constructMethodTerm(t, "Field", p.Term_GET_FIELD, args, map[string]interface{}{}) +} + +// HasFields tests if an object has all of the specified fields. An object has a field if +// it has the specified key and that key maps to a non-null value. For instance, +// the object `{'a':1,'b':2,'c':null}` has the fields `a` and `b`. +func (t Term) HasFields(args ...interface{}) Term { + return constructMethodTerm(t, "HasFields", p.Term_HAS_FIELDS, args, map[string]interface{}{}) +} + +// Pluck plucks out one or more attributes from either an object or a sequence of +// objects (projection). +func (t Term) Pluck(args ...interface{}) Term { + return constructMethodTerm(t, "Pluck", p.Term_PLUCK, args, map[string]interface{}{}) +} + +// Without is the opposite of pluck; takes an object or a sequence of objects, and returns +// them with the specified paths removed. +func (t Term) Without(args ...interface{}) Term { + return constructMethodTerm(t, "Without", p.Term_WITHOUT, args, map[string]interface{}{}) +} + +// Merge merges two objects together to construct a new object with properties from both. +// Gives preference to attributes from other when there is a conflict. +func (t Term) Merge(args ...interface{}) Term { + return constructMethodTerm(t, "Merge", p.Term_MERGE, funcWrapArgs(args), map[string]interface{}{}) +} + +// Append appends a value to an array. +func (t Term) Append(args ...interface{}) Term { + return constructMethodTerm(t, "Append", p.Term_APPEND, args, map[string]interface{}{}) +} + +// Prepend prepends a value to an array. +func (t Term) Prepend(args ...interface{}) Term { + return constructMethodTerm(t, "Prepend", p.Term_PREPEND, args, map[string]interface{}{}) +} + +// Difference removes the elements of one array from another array. +func (t Term) Difference(args ...interface{}) Term { + return constructMethodTerm(t, "Difference", p.Term_DIFFERENCE, args, map[string]interface{}{}) +} + +// SetInsert adds a value to an array and return it as a set (an array with distinct values). +func (t Term) SetInsert(args ...interface{}) Term { + return constructMethodTerm(t, "SetInsert", p.Term_SET_INSERT, args, map[string]interface{}{}) +} + +// SetUnion adds several values to an array and return it as a set (an array with +// distinct values). +func (t Term) SetUnion(args ...interface{}) Term { + return constructMethodTerm(t, "SetUnion", p.Term_SET_UNION, args, map[string]interface{}{}) +} + +// SetIntersection calculates the intersection of two arrays returning values that +// occur in both of them as a set (an array with distinct values). +func (t Term) SetIntersection(args ...interface{}) Term { + return constructMethodTerm(t, "SetIntersection", p.Term_SET_INTERSECTION, args, map[string]interface{}{}) +} + +// SetDifference removes the elements of one array from another and return them as a set (an +// array with distinct values). +func (t Term) SetDifference(args ...interface{}) Term { + return constructMethodTerm(t, "SetDifference", p.Term_SET_DIFFERENCE, args, map[string]interface{}{}) +} + +// InsertAt inserts a value in to an array at a given index. Returns the modified array. +func (t Term) InsertAt(args ...interface{}) Term { + return constructMethodTerm(t, "InsertAt", p.Term_INSERT_AT, args, map[string]interface{}{}) +} + +// SpliceAt inserts several values in to an array at a given index. Returns the modified array. +func (t Term) SpliceAt(args ...interface{}) Term { + return constructMethodTerm(t, "SpliceAt", p.Term_SPLICE_AT, args, map[string]interface{}{}) +} + +// DeleteAt removes an element from an array at a given index. Returns the modified array. +func (t Term) DeleteAt(args ...interface{}) Term { + return constructMethodTerm(t, "DeleteAt", p.Term_DELETE_AT, args, map[string]interface{}{}) +} + +// ChangeAt changes a value in an array at a given index. Returns the modified array. +func (t Term) ChangeAt(args ...interface{}) Term { + return constructMethodTerm(t, "ChangeAt", p.Term_CHANGE_AT, args, map[string]interface{}{}) +} + +// Keys returns an array containing all of the object's keys. +func (t Term) Keys(args ...interface{}) Term { + return constructMethodTerm(t, "Keys", p.Term_KEYS, args, map[string]interface{}{}) +} + +func (t Term) Values(args ...interface{}) Term { + return constructMethodTerm(t, "Values", p.Term_VALUES, args, map[string]interface{}{}) +} + +// Object creates an object from a list of key-value pairs, where the keys must be strings. +func Object(args ...interface{}) Term { + return constructRootTerm("Object", p.Term_OBJECT, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_math.go b/vendor/github.com/GoRethink/gorethink/query_math.go new file mode 100644 index 0000000..fdf4c09 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_math.go @@ -0,0 +1,229 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +var ( + // MinVal represents the smallest possible value RethinkDB can store + MinVal = constructRootTerm("MinVal", p.Term_MINVAL, []interface{}{}, map[string]interface{}{}) + // MaxVal represents the largest possible value RethinkDB can store + MaxVal = constructRootTerm("MaxVal", p.Term_MAXVAL, []interface{}{}, map[string]interface{}{}) +) + +// Add sums two numbers or concatenates two arrays. +func (t Term) Add(args ...interface{}) Term { + return constructMethodTerm(t, "Add", p.Term_ADD, args, map[string]interface{}{}) +} + +// Add sums two numbers or concatenates two arrays. +func Add(args ...interface{}) Term { + return constructRootTerm("Add", p.Term_ADD, args, map[string]interface{}{}) +} + +// Sub subtracts two numbers. +func (t Term) Sub(args ...interface{}) Term { + return constructMethodTerm(t, "Sub", p.Term_SUB, args, map[string]interface{}{}) +} + +// Sub subtracts two numbers. +func Sub(args ...interface{}) Term { + return constructRootTerm("Sub", p.Term_SUB, args, map[string]interface{}{}) +} + +// Mul multiplies two numbers. +func (t Term) Mul(args ...interface{}) Term { + return constructMethodTerm(t, "Mul", p.Term_MUL, args, map[string]interface{}{}) +} + +// Mul multiplies two numbers. +func Mul(args ...interface{}) Term { + return constructRootTerm("Mul", p.Term_MUL, args, map[string]interface{}{}) +} + +// Div divides two numbers. +func (t Term) Div(args ...interface{}) Term { + return constructMethodTerm(t, "Div", p.Term_DIV, args, map[string]interface{}{}) +} + +// Div divides two numbers. +func Div(args ...interface{}) Term { + return constructRootTerm("Div", p.Term_DIV, args, map[string]interface{}{}) +} + +// Mod divides two numbers and returns the remainder. +func (t Term) Mod(args ...interface{}) Term { + return constructMethodTerm(t, "Mod", p.Term_MOD, args, map[string]interface{}{}) +} + +// Mod divides two numbers and returns the remainder. +func Mod(args ...interface{}) Term { + return constructRootTerm("Mod", p.Term_MOD, args, map[string]interface{}{}) +} + +// And performs a logical and on two values. +func (t Term) And(args ...interface{}) Term { + return constructMethodTerm(t, "And", p.Term_AND, args, map[string]interface{}{}) +} + +// And performs a logical and on two values. +func And(args ...interface{}) Term { + return constructRootTerm("And", p.Term_AND, args, map[string]interface{}{}) +} + +// Or performs a logical or on two values. +func (t Term) Or(args ...interface{}) Term { + return constructMethodTerm(t, "Or", p.Term_OR, args, map[string]interface{}{}) +} + +// Or performs a logical or on two values. +func Or(args ...interface{}) Term { + return constructRootTerm("Or", p.Term_OR, args, map[string]interface{}{}) +} + +// Eq returns true if two values are equal. +func (t Term) Eq(args ...interface{}) Term { + return constructMethodTerm(t, "Eq", p.Term_EQ, args, map[string]interface{}{}) +} + +// Eq returns true if two values are equal. +func Eq(args ...interface{}) Term { + return constructRootTerm("Eq", p.Term_EQ, args, map[string]interface{}{}) +} + +// Ne returns true if two values are not equal. +func (t Term) Ne(args ...interface{}) Term { + return constructMethodTerm(t, "Ne", p.Term_NE, args, map[string]interface{}{}) +} + +// Ne returns true if two values are not equal. +func Ne(args ...interface{}) Term { + return constructRootTerm("Ne", p.Term_NE, args, map[string]interface{}{}) +} + +// Gt returns true if the first value is greater than the second. +func (t Term) Gt(args ...interface{}) Term { + return constructMethodTerm(t, "Gt", p.Term_GT, args, map[string]interface{}{}) +} + +// Gt returns true if the first value is greater than the second. +func Gt(args ...interface{}) Term { + return constructRootTerm("Gt", p.Term_GT, args, map[string]interface{}{}) +} + +// Ge returns true if the first value is greater than or equal to the second. +func (t Term) Ge(args ...interface{}) Term { + return constructMethodTerm(t, "Ge", p.Term_GE, args, map[string]interface{}{}) +} + +// Ge returns true if the first value is greater than or equal to the second. +func Ge(args ...interface{}) Term { + return constructRootTerm("Ge", p.Term_GE, args, map[string]interface{}{}) +} + +// Lt returns true if the first value is less than the second. +func (t Term) Lt(args ...interface{}) Term { + return constructMethodTerm(t, "Lt", p.Term_LT, args, map[string]interface{}{}) +} + +// Lt returns true if the first value is less than the second. +func Lt(args ...interface{}) Term { + return constructRootTerm("Lt", p.Term_LT, args, map[string]interface{}{}) +} + +// Le returns true if the first value is less than or equal to the second. +func (t Term) Le(args ...interface{}) Term { + return constructMethodTerm(t, "Le", p.Term_LE, args, map[string]interface{}{}) +} + +// Le returns true if the first value is less than or equal to the second. +func Le(args ...interface{}) Term { + return constructRootTerm("Le", p.Term_LE, args, map[string]interface{}{}) +} + +// Not performs a logical not on a value. +func (t Term) Not(args ...interface{}) Term { + return constructMethodTerm(t, "Not", p.Term_NOT, args, map[string]interface{}{}) +} + +// Not performs a logical not on a value. +func Not(args ...interface{}) Term { + return constructRootTerm("Not", p.Term_NOT, args, map[string]interface{}{}) +} + +// RandomOpts contains the optional arguments for the Random term. +type RandomOpts struct { + Float interface{} `gorethink:"float,omitempty"` +} + +func (o RandomOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Random generates a random number between given (or implied) bounds. Random +// takes zero, one or two arguments. +// +// With zero arguments, the result will be a floating-point number in the range +// [0,1). +// +// With one argument x, the result will be in the range [0,x), and will be an +// integer unless the Float option is set to true. Specifying a floating point +// number without the Float option will raise an error. +// +// With two arguments x and y, the result will be in the range [x,y), and will +// be an integer unless the Float option is set to true. If x and y are equal an +// error will occur, unless the floating-point option has been specified, in +// which case x will be returned. Specifying a floating point number without the +// float option will raise an error. +// +// Note: Any integer responses can be be coerced to floating-points, when +// unmarshaling to a Go floating-point type. The last argument given will always +// be the ‘open’ side of the range, but when generating a floating-point +// number, the ‘open’ side may be less than the ‘closed’ side. +func Random(args ...interface{}) Term { + var opts = map[string]interface{}{} + + // Look for options map + if len(args) > 0 { + if possibleOpts, ok := args[len(args)-1].(RandomOpts); ok { + opts = possibleOpts.toMap() + args = args[:len(args)-1] + } + } + + return constructRootTerm("Random", p.Term_RANDOM, args, opts) +} + +// Round causes the input number to be rounded the given value to the nearest whole integer. +func (t Term) Round(args ...interface{}) Term { + return constructMethodTerm(t, "Round", p.Term_ROUND, args, map[string]interface{}{}) +} + +// Round causes the input number to be rounded the given value to the nearest whole integer. +func Round(args ...interface{}) Term { + return constructRootTerm("Round", p.Term_ROUND, args, map[string]interface{}{}) +} + +// Ceil rounds the given value up, returning the smallest integer value greater +// than or equal to the given value (the value’s ceiling). +func (t Term) Ceil(args ...interface{}) Term { + return constructMethodTerm(t, "Ceil", p.Term_CEIL, args, map[string]interface{}{}) +} + +// Ceil rounds the given value up, returning the smallest integer value greater +// than or equal to the given value (the value’s ceiling). +func Ceil(args ...interface{}) Term { + return constructRootTerm("Ceil", p.Term_CEIL, args, map[string]interface{}{}) +} + +// Floor rounds the given value down, returning the largest integer value less +// than or equal to the given value (the value’s floor). +func (t Term) Floor(args ...interface{}) Term { + return constructMethodTerm(t, "Floor", p.Term_FLOOR, args, map[string]interface{}{}) +} + +// Floor rounds the given value down, returning the largest integer value less +// than or equal to the given value (the value’s floor). +func Floor(args ...interface{}) Term { + return constructRootTerm("Floor", p.Term_FLOOR, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_select.go b/vendor/github.com/GoRethink/gorethink/query_select.go new file mode 100644 index 0000000..144f5a7 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_select.go @@ -0,0 +1,141 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// DB references a database. +func DB(args ...interface{}) Term { + return constructRootTerm("DB", p.Term_DB, args, map[string]interface{}{}) +} + +// TableOpts contains the optional arguments for the Table term +type TableOpts struct { + ReadMode interface{} `gorethink:"read_mode,omitempty"` + UseOutdated interface{} `gorethink:"use_outdated,omitempty"` // Deprecated + IdentifierFormat interface{} `gorethink:"identifier_format,omitempty"` +} + +func (o TableOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Table selects all documents in a table. This command can be chained with +// other commands to do further processing on the data. +// +// There are two optional arguments. +// - useOutdated: if true, this allows potentially out-of-date data to be +// returned, with potentially faster reads. It also allows you to perform reads +// from a secondary replica if a primary has failed. Default false. +// - identifierFormat: possible values are name and uuid, with a default of name. +// If set to uuid, then system tables will refer to servers, databases and tables +// by UUID rather than name. (This only has an effect when used with system tables.) +func Table(name interface{}, optArgs ...TableOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructRootTerm("Table", p.Term_TABLE, []interface{}{name}, opts) +} + +// Table selects all documents in a table. This command can be chained with +// other commands to do further processing on the data. +// +// There are two optional arguments. +// - useOutdated: if true, this allows potentially out-of-date data to be +// returned, with potentially faster reads. It also allows you to perform reads +// from a secondary replica if a primary has failed. Default false. +// - identifierFormat: possible values are name and uuid, with a default of name. +// If set to uuid, then system tables will refer to servers, databases and tables +// by UUID rather than name. (This only has an effect when used with system tables.) +func (t Term) Table(name interface{}, optArgs ...TableOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Table", p.Term_TABLE, []interface{}{name}, opts) +} + +// Get gets a document by primary key. If nothing was found, RethinkDB will return a nil value. +func (t Term) Get(args ...interface{}) Term { + return constructMethodTerm(t, "Get", p.Term_GET, args, map[string]interface{}{}) +} + +// GetAllOpts contains the optional arguments for the GetAll term +type GetAllOpts struct { + Index interface{} `gorethink:"index,omitempty"` +} + +func (o GetAllOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// GetAll gets all documents where the given value matches the value of the primary +// index. Multiple values can be passed this function if you want to select multiple +// documents. If the documents you are fetching have composite keys then each +// argument should be a slice. For more information see the examples. +func (t Term) GetAll(keys ...interface{}) Term { + return constructMethodTerm(t, "GetAll", p.Term_GET_ALL, keys, map[string]interface{}{}) +} + +// GetAllByIndex gets all documents where the given value matches the value of +// the requested index. +func (t Term) GetAllByIndex(index interface{}, keys ...interface{}) Term { + return constructMethodTerm(t, "GetAll", p.Term_GET_ALL, keys, map[string]interface{}{"index": index}) +} + +// BetweenOpts contains the optional arguments for the Between term +type BetweenOpts struct { + Index interface{} `gorethink:"index,omitempty"` + LeftBound interface{} `gorethink:"left_bound,omitempty"` + RightBound interface{} `gorethink:"right_bound,omitempty"` +} + +func (o BetweenOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Between gets all documents between two keys. Accepts three optional arguments: +// index, leftBound, and rightBound. If index is set to the name of a secondary +// index, between will return all documents where that index’s value is in the +// specified range (it uses the primary key by default). leftBound or rightBound +// may be set to open or closed to indicate whether or not to include that endpoint +// of the range (by default, leftBound is closed and rightBound is open). +// +// You may also use the special constants r.minval and r.maxval for boundaries, +// which represent “less than any index key” and “more than any index key” +// respectively. For instance, if you use r.minval as the lower key, then between +// will return all documents whose primary keys (or indexes) are less than the +// specified upper key. +func (t Term) Between(lowerKey, upperKey interface{}, optArgs ...BetweenOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Between", p.Term_BETWEEN, []interface{}{lowerKey, upperKey}, opts) +} + +// FilterOpts contains the optional arguments for the Filter term +type FilterOpts struct { + Default interface{} `gorethink:"default,omitempty"` +} + +func (o FilterOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Filter gets all the documents for which the given predicate is true. +// +// Filter can be called on a sequence, selection, or a field containing an array +// of elements. The return type is the same as the type on which the function was +// called on. The body of every filter is wrapped in an implicit `.default(false)`, +// and the default value can be changed by passing the optional argument `default`. +// Setting this optional argument to `r.error()` will cause any non-existence +// errors to abort the filter. +func (t Term) Filter(f interface{}, optArgs ...FilterOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Filter", p.Term_FILTER, []interface{}{funcWrap(f)}, opts) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_string.go b/vendor/github.com/GoRethink/gorethink/query_string.go new file mode 100644 index 0000000..47feca3 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_string.go @@ -0,0 +1,44 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Match matches against a regular expression. If no match is found, returns +// null. If there is a match then an object with the following fields is +// returned: +// str: The matched string +// start: The matched string’s start +// end: The matched string’s end +// groups: The capture groups defined with parentheses +// +// Accepts RE2 syntax (https://code.google.com/p/re2/wiki/Syntax). You can +// enable case-insensitive matching by prefixing the regular expression with +// (?i). See the linked RE2 documentation for more flags. +// +// The match command does not support backreferences. +func (t Term) Match(args ...interface{}) Term { + return constructMethodTerm(t, "Match", p.Term_MATCH, args, map[string]interface{}{}) +} + +// Split splits a string into substrings. Splits on whitespace when called with no arguments. +// When called with a separator, splits on that separator. When called with a separator +// and a maximum number of splits, splits on that separator at most max_splits times. +// (Can be called with null as the separator if you want to split on whitespace while still +// specifying max_splits.) +// +// Mimics the behavior of Python's string.split in edge cases, except for splitting on the +// empty string, which instead produces an array of single-character strings. +func (t Term) Split(args ...interface{}) Term { + return constructMethodTerm(t, "Split", p.Term_SPLIT, funcWrapArgs(args), map[string]interface{}{}) +} + +// Upcase upper-cases a string. +func (t Term) Upcase(args ...interface{}) Term { + return constructMethodTerm(t, "Upcase", p.Term_UPCASE, args, map[string]interface{}{}) +} + +// Downcase lower-cases a string. +func (t Term) Downcase(args ...interface{}) Term { + return constructMethodTerm(t, "Downcase", p.Term_DOWNCASE, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_table.go b/vendor/github.com/GoRethink/gorethink/query_table.go new file mode 100644 index 0000000..87058b1 --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_table.go @@ -0,0 +1,173 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// TableCreateOpts contains the optional arguments for the TableCreate term +type TableCreateOpts struct { + PrimaryKey interface{} `gorethink:"primary_key,omitempty"` + Durability interface{} `gorethink:"durability,omitempty"` + Shards interface{} `gorethink:"shards,omitempty"` + Replicas interface{} `gorethink:"replicas,omitempty"` + PrimaryReplicaTag interface{} `gorethink:"primary_replica_tag,omitempty"` + NonVotingReplicaTags interface{} `gorethink:"nonvoting_replica_tags,omitempty"` +} + +func (o TableCreateOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// TableCreate creates a table. A RethinkDB table is a collection of JSON +// documents. +// +// Note: Only alphanumeric characters and underscores are valid for the table name. +func TableCreate(name interface{}, optArgs ...TableCreateOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructRootTerm("TableCreate", p.Term_TABLE_CREATE, []interface{}{name}, opts) +} + +// TableCreate creates a table. A RethinkDB table is a collection of JSON +// documents. +// +// Note: Only alphanumeric characters and underscores are valid for the table name. +func (t Term) TableCreate(name interface{}, optArgs ...TableCreateOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "TableCreate", p.Term_TABLE_CREATE, []interface{}{name}, opts) +} + +// TableDrop deletes a table. The table and all its data will be deleted. +func TableDrop(args ...interface{}) Term { + return constructRootTerm("TableDrop", p.Term_TABLE_DROP, args, map[string]interface{}{}) +} + +// TableDrop deletes a table. The table and all its data will be deleted. +func (t Term) TableDrop(args ...interface{}) Term { + return constructMethodTerm(t, "TableDrop", p.Term_TABLE_DROP, args, map[string]interface{}{}) +} + +// TableList lists all table names in a database. +func TableList(args ...interface{}) Term { + return constructRootTerm("TableList", p.Term_TABLE_LIST, args, map[string]interface{}{}) +} + +// TableList lists all table names in a database. +func (t Term) TableList(args ...interface{}) Term { + return constructMethodTerm(t, "TableList", p.Term_TABLE_LIST, args, map[string]interface{}{}) +} + +// IndexCreateOpts contains the optional arguments for the IndexCreate term +type IndexCreateOpts struct { + Multi interface{} `gorethink:"multi,omitempty"` + Geo interface{} `gorethink:"geo,omitempty"` +} + +func (o IndexCreateOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// IndexCreate creates a new secondary index on a table. Secondary indexes +// improve the speed of many read queries at the slight cost of increased +// storage space and decreased write performance. +// +// IndexCreate supports the creation of the following types of indexes, to create +// indexes using arbitrary expressions use IndexCreateFunc. +// - Simple indexes based on the value of a single field. +// - Geospatial indexes based on indexes of geometry objects, created when the +// geo optional argument is true. +func (t Term) IndexCreate(name interface{}, optArgs ...IndexCreateOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "IndexCreate", p.Term_INDEX_CREATE, []interface{}{name}, opts) +} + +// IndexCreateFunc creates a new secondary index on a table. Secondary indexes +// improve the speed of many read queries at the slight cost of increased +// storage space and decreased write performance. The function takes a index +// name and RQL term as the index value , the term can be an anonymous function +// or a binary representation obtained from the function field of indexStatus. +// +// It supports the creation of the following types of indexes. +// - Simple indexes based on the value of a single field where the index has a +// different name to the field. +// - Compound indexes based on multiple fields. +// - Multi indexes based on arrays of values, created when the multi optional argument is true. +func (t Term) IndexCreateFunc(name, indexFunction interface{}, optArgs ...IndexCreateOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "IndexCreate", p.Term_INDEX_CREATE, []interface{}{name, funcWrap(indexFunction)}, opts) +} + +// IndexDrop deletes a previously created secondary index of a table. +func (t Term) IndexDrop(args ...interface{}) Term { + return constructMethodTerm(t, "IndexDrop", p.Term_INDEX_DROP, args, map[string]interface{}{}) +} + +// IndexList lists all the secondary indexes of a table. +func (t Term) IndexList(args ...interface{}) Term { + return constructMethodTerm(t, "IndexList", p.Term_INDEX_LIST, args, map[string]interface{}{}) +} + +// IndexRenameOpts contains the optional arguments for the IndexRename term +type IndexRenameOpts struct { + Overwrite interface{} `gorethink:"overwrite,omitempty"` +} + +func (o IndexRenameOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// IndexRename renames an existing secondary index on a table. +func (t Term) IndexRename(oldName, newName interface{}, optArgs ...IndexRenameOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "IndexRename", p.Term_INDEX_RENAME, []interface{}{oldName, newName}, opts) +} + +// IndexStatus gets the status of the specified indexes on this table, or the +// status of all indexes on this table if no indexes are specified. +func (t Term) IndexStatus(args ...interface{}) Term { + return constructMethodTerm(t, "IndexStatus", p.Term_INDEX_STATUS, args, map[string]interface{}{}) +} + +// IndexWait waits for the specified indexes on this table to be ready, or for +// all indexes on this table to be ready if no indexes are specified. +func (t Term) IndexWait(args ...interface{}) Term { + return constructMethodTerm(t, "IndexWait", p.Term_INDEX_WAIT, args, map[string]interface{}{}) +} + +// ChangesOpts contains the optional arguments for the Changes term +type ChangesOpts struct { + Squash interface{} `gorethink:"squash,omitempty"` + IncludeInitial interface{} `gorethink:"include_initial,omitempty"` + IncludeStates interface{} `gorethink:"include_states,omitempty"` + IncludeOffsets interface{} `gorethink:"include_offsets,omitempty"` + IncludeTypes interface{} `gorethink:"include_types,omitempty"` + ChangefeedQueueSize interface{} `gorethink:"changefeed_queue_size,omitempty"` +} + +// ChangesOpts contains the optional arguments for the Changes term +func (o ChangesOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Changes returns an infinite stream of objects representing changes to a query. +func (t Term) Changes(optArgs ...ChangesOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Changes", p.Term_CHANGES, []interface{}{}, opts) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_time.go b/vendor/github.com/GoRethink/gorethink/query_time.go new file mode 100644 index 0000000..d53ee5c --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_time.go @@ -0,0 +1,187 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Now returns a time object representing the current time in UTC +func Now(args ...interface{}) Term { + return constructRootTerm("Now", p.Term_NOW, args, map[string]interface{}{}) +} + +// Time creates a time object for a specific time +func Time(args ...interface{}) Term { + return constructRootTerm("Time", p.Term_TIME, args, map[string]interface{}{}) +} + +// EpochTime returns a time object based on seconds since epoch +func EpochTime(args ...interface{}) Term { + return constructRootTerm("EpochTime", p.Term_EPOCH_TIME, args, map[string]interface{}{}) +} + +// ISO8601Opts contains the optional arguments for the ISO8601 term +type ISO8601Opts struct { + DefaultTimezone interface{} `gorethink:"default_timezone,omitempty"` +} + +func (o ISO8601Opts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// ISO8601 returns a time object based on an ISO8601 formatted date-time string +func ISO8601(date interface{}, optArgs ...ISO8601Opts) Term { + + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructRootTerm("ISO8601", p.Term_ISO8601, []interface{}{date}, opts) +} + +// InTimezone returns a new time object with a different time zone. While the +// time stays the same, the results returned by methods such as hours() will +// change since they take the timezone into account. The timezone argument +// has to be of the ISO 8601 format. +func (t Term) InTimezone(args ...interface{}) Term { + return constructMethodTerm(t, "InTimezone", p.Term_IN_TIMEZONE, args, map[string]interface{}{}) +} + +// Timezone returns the timezone of the time object +func (t Term) Timezone(args ...interface{}) Term { + return constructMethodTerm(t, "Timezone", p.Term_TIMEZONE, args, map[string]interface{}{}) +} + +// DuringOpts contains the optional arguments for the During term +type DuringOpts struct { + LeftBound interface{} `gorethink:"left_bound,omitempty"` + RightBound interface{} `gorethink:"right_bound,omitempty"` +} + +func (o DuringOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// During returns true if a time is between two other times +// (by default, inclusive for the start, exclusive for the end). +func (t Term) During(startTime, endTime interface{}, optArgs ...DuringOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "During", p.Term_DURING, []interface{}{startTime, endTime}, opts) +} + +// Date returns a new time object only based on the day, month and year +// (ie. the same day at 00:00). +func (t Term) Date(args ...interface{}) Term { + return constructMethodTerm(t, "Date", p.Term_DATE, args, map[string]interface{}{}) +} + +// TimeOfDay returns the number of seconds elapsed since the beginning of the +// day stored in the time object. +func (t Term) TimeOfDay(args ...interface{}) Term { + return constructMethodTerm(t, "TimeOfDay", p.Term_TIME_OF_DAY, args, map[string]interface{}{}) +} + +// Year returns the year of a time object. +func (t Term) Year(args ...interface{}) Term { + return constructMethodTerm(t, "Year", p.Term_YEAR, args, map[string]interface{}{}) +} + +// Month returns the month of a time object as a number between 1 and 12. +// For your convenience, the terms r.January(), r.February() etc. are +// defined and map to the appropriate integer. +func (t Term) Month(args ...interface{}) Term { + return constructMethodTerm(t, "Month", p.Term_MONTH, args, map[string]interface{}{}) +} + +// Day return the day of a time object as a number between 1 and 31. +func (t Term) Day(args ...interface{}) Term { + return constructMethodTerm(t, "Day", p.Term_DAY, args, map[string]interface{}{}) +} + +// DayOfWeek returns the day of week of a time object as a number between +// 1 and 7 (following ISO 8601 standard). For your convenience, +// the terms r.Monday(), r.Tuesday() etc. are defined and map to +// the appropriate integer. +func (t Term) DayOfWeek(args ...interface{}) Term { + return constructMethodTerm(t, "DayOfWeek", p.Term_DAY_OF_WEEK, args, map[string]interface{}{}) +} + +// DayOfYear returns the day of the year of a time object as a number between +// 1 and 366 (following ISO 8601 standard). +func (t Term) DayOfYear(args ...interface{}) Term { + return constructMethodTerm(t, "DayOfYear", p.Term_DAY_OF_YEAR, args, map[string]interface{}{}) +} + +// Hours returns the hour in a time object as a number between 0 and 23. +func (t Term) Hours(args ...interface{}) Term { + return constructMethodTerm(t, "Hours", p.Term_HOURS, args, map[string]interface{}{}) +} + +// Minutes returns the minute in a time object as a number between 0 and 59. +func (t Term) Minutes(args ...interface{}) Term { + return constructMethodTerm(t, "Minutes", p.Term_MINUTES, args, map[string]interface{}{}) +} + +// Seconds returns the seconds in a time object as a number between 0 and +// 59.999 (double precision). +func (t Term) Seconds(args ...interface{}) Term { + return constructMethodTerm(t, "Seconds", p.Term_SECONDS, args, map[string]interface{}{}) +} + +// ToISO8601 converts a time object to its iso 8601 format. +func (t Term) ToISO8601(args ...interface{}) Term { + return constructMethodTerm(t, "ToISO8601", p.Term_TO_ISO8601, args, map[string]interface{}{}) +} + +// ToEpochTime converts a time object to its epoch time. +func (t Term) ToEpochTime(args ...interface{}) Term { + return constructMethodTerm(t, "ToEpochTime", p.Term_TO_EPOCH_TIME, args, map[string]interface{}{}) +} + +var ( + // Days + + // Monday is a constant representing the day of the week Monday + Monday = constructRootTerm("Monday", p.Term_MONDAY, []interface{}{}, map[string]interface{}{}) + // Tuesday is a constant representing the day of the week Tuesday + Tuesday = constructRootTerm("Tuesday", p.Term_TUESDAY, []interface{}{}, map[string]interface{}{}) + // Wednesday is a constant representing the day of the week Wednesday + Wednesday = constructRootTerm("Wednesday", p.Term_WEDNESDAY, []interface{}{}, map[string]interface{}{}) + // Thursday is a constant representing the day of the week Thursday + Thursday = constructRootTerm("Thursday", p.Term_THURSDAY, []interface{}{}, map[string]interface{}{}) + // Friday is a constant representing the day of the week Friday + Friday = constructRootTerm("Friday", p.Term_FRIDAY, []interface{}{}, map[string]interface{}{}) + // Saturday is a constant representing the day of the week Saturday + Saturday = constructRootTerm("Saturday", p.Term_SATURDAY, []interface{}{}, map[string]interface{}{}) + // Sunday is a constant representing the day of the week Sunday + Sunday = constructRootTerm("Sunday", p.Term_SUNDAY, []interface{}{}, map[string]interface{}{}) + + // Months + + // January is a constant representing the month January + January = constructRootTerm("January", p.Term_JANUARY, []interface{}{}, map[string]interface{}{}) + // February is a constant representing the month February + February = constructRootTerm("February", p.Term_FEBRUARY, []interface{}{}, map[string]interface{}{}) + // March is a constant representing the month March + March = constructRootTerm("March", p.Term_MARCH, []interface{}{}, map[string]interface{}{}) + // April is a constant representing the month April + April = constructRootTerm("April", p.Term_APRIL, []interface{}{}, map[string]interface{}{}) + // May is a constant representing the month May + May = constructRootTerm("May", p.Term_MAY, []interface{}{}, map[string]interface{}{}) + // June is a constant representing the month June + June = constructRootTerm("June", p.Term_JUNE, []interface{}{}, map[string]interface{}{}) + // July is a constant representing the month July + July = constructRootTerm("July", p.Term_JULY, []interface{}{}, map[string]interface{}{}) + // August is a constant representing the month August + August = constructRootTerm("August", p.Term_AUGUST, []interface{}{}, map[string]interface{}{}) + // September is a constant representing the month September + September = constructRootTerm("September", p.Term_SEPTEMBER, []interface{}{}, map[string]interface{}{}) + // October is a constant representing the month October + October = constructRootTerm("October", p.Term_OCTOBER, []interface{}{}, map[string]interface{}{}) + // November is a constant representing the month November + November = constructRootTerm("November", p.Term_NOVEMBER, []interface{}{}, map[string]interface{}{}) + // December is a constant representing the month December + December = constructRootTerm("December", p.Term_DECEMBER, []interface{}{}, map[string]interface{}{}) +) diff --git a/vendor/github.com/GoRethink/gorethink/query_transformation.go b/vendor/github.com/GoRethink/gorethink/query_transformation.go new file mode 100644 index 0000000..a834dfb --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_transformation.go @@ -0,0 +1,193 @@ +package gorethink + +import p "gopkg.in/gorethink/gorethink.v2/ql2" + +// Map transform each element of the sequence by applying the given mapping +// function. It takes two arguments, a sequence and a function of type +// `func (r.Term) interface{}`. +// +// For example this query doubles each element in an array: +// +// r.Map([]int{1,3,6}, func (row r.Term) interface{} { +// return row.Mul(2) +// }) +func Map(args ...interface{}) Term { + if len(args) > 0 { + args = append(args[:len(args)-1], funcWrap(args[len(args)-1])) + } + + return constructRootTerm("Map", p.Term_MAP, args, map[string]interface{}{}) +} + +// Map transforms each element of the sequence by applying the given mapping +// function. It takes one argument of type `func (r.Term) interface{}`. +// +// For example this query doubles each element in an array: +// +// r.Expr([]int{1,3,6}).Map(func (row r.Term) interface{} { +// return row.Mul(2) +// }) +func (t Term) Map(args ...interface{}) Term { + if len(args) > 0 { + args = append(args[:len(args)-1], funcWrap(args[len(args)-1])) + } + + return constructMethodTerm(t, "Map", p.Term_MAP, args, map[string]interface{}{}) +} + +// WithFields takes a sequence of objects and a list of fields. If any objects in the +// sequence don't have all of the specified fields, they're dropped from the +// sequence. The remaining objects have the specified fields plucked out. +// (This is identical to `HasFields` followed by `Pluck` on a sequence.) +func (t Term) WithFields(args ...interface{}) Term { + return constructMethodTerm(t, "WithFields", p.Term_WITH_FIELDS, args, map[string]interface{}{}) +} + +// ConcatMap concatenates one or more elements into a single sequence using a +// mapping function. ConcatMap works in a similar fashion to Map, applying the +// given function to each element in a sequence, but it will always return a +// single sequence. +func (t Term) ConcatMap(args ...interface{}) Term { + return constructMethodTerm(t, "ConcatMap", p.Term_CONCAT_MAP, funcWrapArgs(args), map[string]interface{}{}) +} + +// OrderByOpts contains the optional arguments for the OrderBy term +type OrderByOpts struct { + Index interface{} `gorethink:"index,omitempty"` +} + +func (o OrderByOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// OrderBy sorts the sequence by document values of the given key(s). To specify +// the ordering, wrap the attribute with either r.Asc or r.Desc (defaults to +// ascending). +// +// Sorting without an index requires the server to hold the sequence in memory, +// and is limited to 100,000 documents (or the setting of the ArrayLimit option +// for run). Sorting with an index can be done on arbitrarily large tables, or +// after a between command using the same index. +func (t Term) OrderBy(args ...interface{}) Term { + var opts = map[string]interface{}{} + + // Look for options map + if len(args) > 0 { + if possibleOpts, ok := args[len(args)-1].(OrderByOpts); ok { + opts = possibleOpts.toMap() + args = args[:len(args)-1] + } + } + + for k, arg := range args { + if t, ok := arg.(Term); !(ok && (t.termType == p.Term_DESC || t.termType == p.Term_ASC)) { + args[k] = funcWrap(arg) + } + } + + return constructMethodTerm(t, "OrderBy", p.Term_ORDER_BY, args, opts) +} + +// Desc is used by the OrderBy term to specify the ordering to be descending. +func Desc(args ...interface{}) Term { + return constructRootTerm("Desc", p.Term_DESC, funcWrapArgs(args), map[string]interface{}{}) +} + +// Asc is used by the OrderBy term to specify that the ordering be ascending (the +// default). +func Asc(args ...interface{}) Term { + return constructRootTerm("Asc", p.Term_ASC, funcWrapArgs(args), map[string]interface{}{}) +} + +// Skip skips a number of elements from the head of the sequence. +func (t Term) Skip(args ...interface{}) Term { + return constructMethodTerm(t, "Skip", p.Term_SKIP, args, map[string]interface{}{}) +} + +// Limit ends the sequence after the given number of elements. +func (t Term) Limit(args ...interface{}) Term { + return constructMethodTerm(t, "Limit", p.Term_LIMIT, args, map[string]interface{}{}) +} + +// SliceOpts contains the optional arguments for the Slice term +type SliceOpts struct { + LeftBound interface{} `gorethink:"left_bound,omitempty"` + RightBound interface{} `gorethink:"right_bound,omitempty"` +} + +func (o SliceOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Slice trims the sequence to within the bounds provided. +func (t Term) Slice(args ...interface{}) Term { + var opts = map[string]interface{}{} + + // Look for options map + if len(args) > 0 { + if possibleOpts, ok := args[len(args)-1].(SliceOpts); ok { + opts = possibleOpts.toMap() + args = args[:len(args)-1] + } + } + + return constructMethodTerm(t, "Slice", p.Term_SLICE, args, opts) +} + +// AtIndex gets a single field from an object or the nth element from a sequence. +func (t Term) AtIndex(args ...interface{}) Term { + return constructMethodTerm(t, "AtIndex", p.Term_BRACKET, args, map[string]interface{}{}) +} + +// Nth gets the nth element from a sequence. +func (t Term) Nth(args ...interface{}) Term { + return constructMethodTerm(t, "Nth", p.Term_NTH, args, map[string]interface{}{}) +} + +// OffsetsOf gets the indexes of an element in a sequence. If the argument is a +// predicate, get the indexes of all elements matching it. +func (t Term) OffsetsOf(args ...interface{}) Term { + return constructMethodTerm(t, "OffsetsOf", p.Term_OFFSETS_OF, funcWrapArgs(args), map[string]interface{}{}) +} + +// IsEmpty tests if a sequence is empty. +func (t Term) IsEmpty(args ...interface{}) Term { + return constructMethodTerm(t, "IsEmpty", p.Term_IS_EMPTY, args, map[string]interface{}{}) +} + +// UnionOpts contains the optional arguments for the Slice term +type UnionOpts struct { + Interleave interface{} `gorethink:"interleave,omitempty"` +} + +func (o UnionOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Union concatenates two sequences. +func Union(args ...interface{}) Term { + return constructRootTerm("Union", p.Term_UNION, args, map[string]interface{}{}) +} + +// Union concatenates two sequences. +func (t Term) Union(args ...interface{}) Term { + return constructMethodTerm(t, "Union", p.Term_UNION, args, map[string]interface{}{}) +} + +// UnionWithOpts like Union concatenates two sequences however allows for optional +// arguments to be passed. +func UnionWithOpts(optArgs UnionOpts, args ...interface{}) Term { + return constructRootTerm("Union", p.Term_UNION, args, optArgs.toMap()) +} + +// UnionWithOpts like Union concatenates two sequences however allows for optional +// arguments to be passed. +func (t Term) UnionWithOpts(optArgs UnionOpts, args ...interface{}) Term { + return constructMethodTerm(t, "Union", p.Term_UNION, args, optArgs.toMap()) +} + +// Sample selects a given number of elements from a sequence with uniform random +// distribution. Selection is done without replacement. +func (t Term) Sample(args ...interface{}) Term { + return constructMethodTerm(t, "Sample", p.Term_SAMPLE, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/query_write.go b/vendor/github.com/GoRethink/gorethink/query_write.go new file mode 100644 index 0000000..aa1d2cd --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/query_write.go @@ -0,0 +1,98 @@ +package gorethink + +import ( + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// InsertOpts contains the optional arguments for the Insert term +type InsertOpts struct { + Durability interface{} `gorethink:"durability,omitempty"` + ReturnChanges interface{} `gorethink:"return_changes,omitempty"` + Conflict interface{} `gorethink:"conflict,omitempty"` +} + +func (o InsertOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Insert documents into a table. Accepts a single document or an array +// of documents. +func (t Term) Insert(arg interface{}, optArgs ...InsertOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Insert", p.Term_INSERT, []interface{}{Expr(arg)}, opts) +} + +// UpdateOpts contains the optional arguments for the Update term +type UpdateOpts struct { + Durability interface{} `gorethink:"durability,omitempty"` + ReturnChanges interface{} `gorethink:"return_changes,omitempty"` + NonAtomic interface{} `gorethink:"non_atomic,omitempty"` + Conflict interface{} `gorethink:"conflict,omitempty"` +} + +func (o UpdateOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Update JSON documents in a table. Accepts a JSON document, a ReQL expression, +// or a combination of the two. You can pass options like returnChanges that will +// return the old and new values of the row you have modified. +func (t Term) Update(arg interface{}, optArgs ...UpdateOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Update", p.Term_UPDATE, []interface{}{funcWrap(arg)}, opts) +} + +// ReplaceOpts contains the optional arguments for the Replace term +type ReplaceOpts struct { + Durability interface{} `gorethink:"durability,omitempty"` + ReturnChanges interface{} `gorethink:"return_changes,omitempty"` + NonAtomic interface{} `gorethink:"non_atomic,omitempty"` +} + +func (o ReplaceOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Replace documents in a table. Accepts a JSON document or a ReQL expression, +// and replaces the original document with the new one. The new document must +// have the same primary key as the original document. +func (t Term) Replace(arg interface{}, optArgs ...ReplaceOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Replace", p.Term_REPLACE, []interface{}{funcWrap(arg)}, opts) +} + +// DeleteOpts contains the optional arguments for the Delete term +type DeleteOpts struct { + Durability interface{} `gorethink:"durability,omitempty"` + ReturnChanges interface{} `gorethink:"return_changes,omitempty"` +} + +func (o DeleteOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Delete one or more documents from a table. +func (t Term) Delete(optArgs ...DeleteOpts) Term { + opts := map[string]interface{}{} + if len(optArgs) >= 1 { + opts = optArgs[0].toMap() + } + return constructMethodTerm(t, "Delete", p.Term_DELETE, []interface{}{}, opts) +} + +// Sync ensures that writes on a given table are written to permanent storage. +// Queries that specify soft durability do not give such guarantees, so Sync +// can be used to ensure the state of these queries. A call to Sync does not +// return until all previous writes to the table are persisted. +func (t Term) Sync(args ...interface{}) Term { + return constructMethodTerm(t, "Sync", p.Term_SYNC, args, map[string]interface{}{}) +} diff --git a/vendor/github.com/GoRethink/gorethink/session.go b/vendor/github.com/GoRethink/gorethink/session.go new file mode 100644 index 0000000..5e2241e --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/session.go @@ -0,0 +1,328 @@ +package gorethink + +import ( + "crypto/tls" + "sync" + "time" + + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// A Session represents a connection to a RethinkDB cluster and should be used +// when executing queries. +type Session struct { + hosts []Host + opts *ConnectOpts + + mu sync.RWMutex + cluster *Cluster + closed bool +} + +// ConnectOpts is used to specify optional arguments when connecting to a cluster. +type ConnectOpts struct { + // Address holds the address of the server initially used when creating the + // session. Only used if Addresses is empty + Address string `gorethink:"address,omitempty"` + // Addresses holds the addresses of the servers initially used when creating + // the session. + Addresses []string `gorethink:"addresses,omitempty"` + // Database is the default database name used when executing queries, this + // value is only used if the query does not contain any DB term + Database string `gorethink:"database,omitempty"` + // Username holds the username used for authentication, if blank (and the v1 + // handshake protocol is being used) then the admin user is used + Username string `gorethink:"username,omitempty"` + // Password holds the password used for authentication (only used when using + // the v1 handshake protocol) + Password string `gorethink:"password,omitempty"` + // AuthKey is used for authentication when using the v0.4 handshake protocol + // This field is no deprecated + AuthKey string `gorethink:"authkey,omitempty"` + // Timeout is the time the driver waits when creating new connections, to + // configure the timeout used when executing queries use WriteTimeout and + // ReadTimeout + Timeout time.Duration `gorethink:"timeout,omitempty"` + // WriteTimeout is the amount of time the driver will wait when sending the + // query to the server + WriteTimeout time.Duration `gorethink:"write_timeout,omitempty"` + // ReadTimeout is the amount of time the driver will wait for a response from + // the server when executing queries. + ReadTimeout time.Duration `gorethink:"read_timeout,omitempty"` + // KeepAlivePeriod is the keep alive period used by the connection, by default + // this is 30s. It is not possible to disable keep alive messages + KeepAlivePeriod time.Duration `gorethink:"keep_alive_timeout,omitempty"` + // TLSConfig holds the TLS configuration and can be used when connecting + // to a RethinkDB server protected by SSL + TLSConfig *tls.Config `gorethink:"tlsconfig,omitempty"` + // HandshakeVersion is used to specify which handshake version should be + // used, this currently defaults to v1 which is used by RethinkDB 2.3 and + // later. If you are using an older version then you can set the handshake + // version to 0.4 + HandshakeVersion HandshakeVersion `gorethink:"handshake_version,omitempty"` + // UseJSONNumber indicates whether the cursors running in this session should + // use json.Number instead of float64 while unmarshaling documents with + // interface{}. The default is `false`. + UseJSONNumber bool + // NumRetries is the number of times a query is retried if a connection + // error is detected, queries are not retried if RethinkDB returns a + // runtime error. + NumRetries int + + // InitialCap is used by the internal connection pool and is used to + // configure how many connections are created for each host when the + // session is created. If zero then no connections are created until + // the first query is executed. + InitialCap int `gorethink:"initial_cap,omitempty"` + // MaxOpen is used by the internal connection pool and is used to configure + // the maximum number of connections held in the pool. If all available + // connections are being used then the driver will open new connections as + // needed however they will not be returned to the pool. By default the + // maximum number of connections is 2 + MaxOpen int `gorethink:"max_open,omitempty"` + + // Below options are for cluster discovery, please note there is a high + // probability of these changing as the API is still being worked on. + + // DiscoverHosts is used to enable host discovery, when true the driver + // will attempt to discover any new nodes added to the cluster and then + // start sending queries to these new nodes. + DiscoverHosts bool `gorethink:"discover_hosts,omitempty"` + // HostDecayDuration is used by the go-hostpool package to calculate a weighted + // score when selecting a host. By default a value of 5 minutes is used. + HostDecayDuration time.Duration + + // Deprecated: This function is no longer used due to changes in the + // way hosts are selected. + NodeRefreshInterval time.Duration `gorethink:"node_refresh_interval,omitempty"` + // Deprecated: Use InitialCap instead + MaxIdle int `gorethink:"max_idle,omitempty"` +} + +func (o ConnectOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// Connect creates a new database session. To view the available connection +// options see ConnectOpts. +// +// By default maxIdle and maxOpen are set to 1: passing values greater +// than the default (e.g. MaxIdle: "10", MaxOpen: "20") will provide a +// pool of re-usable connections. +// +// Basic connection example: +// +// session, err := r.Connect(r.ConnectOpts{ +// Host: "localhost:28015", +// Database: "test", +// AuthKey: "14daak1cad13dj", +// }) +// +// Cluster connection example: +// +// session, err := r.Connect(r.ConnectOpts{ +// Hosts: []string{"localhost:28015", "localhost:28016"}, +// Database: "test", +// AuthKey: "14daak1cad13dj", +// }) +func Connect(opts ConnectOpts) (*Session, error) { + var addresses = opts.Addresses + if len(addresses) == 0 { + addresses = []string{opts.Address} + } + + hosts := make([]Host, len(addresses)) + for i, address := range addresses { + hostname, port := splitAddress(address) + hosts[i] = NewHost(hostname, port) + } + if len(hosts) <= 0 { + return nil, ErrNoHosts + } + + // Connect + s := &Session{ + hosts: hosts, + opts: &opts, + } + + err := s.Reconnect() + if err != nil { + // note: s.Reconnect() will initialize cluster information which + // will cause the .IsConnected() method to be caught in a loop + return &Session{ + hosts: hosts, + opts: &opts, + }, err + } + + return s, nil +} + +// CloseOpts allows calls to the Close function to be configured. +type CloseOpts struct { + NoReplyWait bool `gorethink:"noreplyWait,omitempty"` +} + +func (o CloseOpts) toMap() map[string]interface{} { + return optArgsToMap(o) +} + +// IsConnected returns true if session has a valid connection. +func (s *Session) IsConnected() bool { + s.mu.Lock() + defer s.mu.Unlock() + + if s.cluster == nil || s.closed { + return false + } + return s.cluster.IsConnected() +} + +// Reconnect closes and re-opens a session. +func (s *Session) Reconnect(optArgs ...CloseOpts) error { + var err error + + if err = s.Close(optArgs...); err != nil { + return err + } + + s.mu.Lock() + s.cluster, err = NewCluster(s.hosts, s.opts) + if err != nil { + s.mu.Unlock() + return err + } + + s.closed = false + s.mu.Unlock() + + return nil +} + +// Close closes the session +func (s *Session) Close(optArgs ...CloseOpts) error { + s.mu.Lock() + defer s.mu.Unlock() + + if s.closed { + return nil + } + + if len(optArgs) >= 1 { + if optArgs[0].NoReplyWait { + s.mu.Unlock() + s.NoReplyWait() + s.mu.Lock() + } + } + + if s.cluster != nil { + s.cluster.Close() + } + s.cluster = nil + s.closed = true + + return nil +} + +// SetInitialPoolCap sets the initial capacity of the connection pool. +func (s *Session) SetInitialPoolCap(n int) { + s.mu.Lock() + defer s.mu.Unlock() + + s.opts.InitialCap = n + s.cluster.SetInitialPoolCap(n) +} + +// SetMaxIdleConns sets the maximum number of connections in the idle +// connection pool. +func (s *Session) SetMaxIdleConns(n int) { + s.mu.Lock() + defer s.mu.Unlock() + + s.opts.MaxIdle = n + s.cluster.SetMaxIdleConns(n) +} + +// SetMaxOpenConns sets the maximum number of open connections to the database. +func (s *Session) SetMaxOpenConns(n int) { + s.mu.Lock() + defer s.mu.Unlock() + + s.opts.MaxOpen = n + s.cluster.SetMaxOpenConns(n) +} + +// NoReplyWait ensures that previous queries with the noreply flag have been +// processed by the server. Note that this guarantee only applies to queries +// run on the given connection +func (s *Session) NoReplyWait() error { + s.mu.RLock() + defer s.mu.RUnlock() + + if s.closed { + return ErrConnectionClosed + } + + return s.cluster.Exec(Query{ + Type: p.Query_NOREPLY_WAIT, + }) +} + +// Use changes the default database used +func (s *Session) Use(database string) { + s.mu.Lock() + defer s.mu.Unlock() + + s.opts.Database = database +} + +// Database returns the selected database set by Use +func (s *Session) Database() string { + s.mu.RLock() + defer s.mu.RUnlock() + + return s.opts.Database +} + +// Query executes a ReQL query using the session to connect to the database +func (s *Session) Query(q Query) (*Cursor, error) { + s.mu.RLock() + defer s.mu.RUnlock() + + if s.closed { + return nil, ErrConnectionClosed + } + + return s.cluster.Query(q) +} + +// Exec executes a ReQL query using the session to connect to the database +func (s *Session) Exec(q Query) error { + s.mu.RLock() + defer s.mu.RUnlock() + + if s.closed { + return ErrConnectionClosed + } + + return s.cluster.Exec(q) +} + +// Server returns the server name and server UUID being used by a connection. +func (s *Session) Server() (ServerResponse, error) { + return s.cluster.Server() +} + +// SetHosts resets the hosts used when connecting to the RethinkDB cluster +func (s *Session) SetHosts(hosts []Host) { + s.mu.Lock() + defer s.mu.Unlock() + + s.hosts = hosts +} + +func (s *Session) newQuery(t Term, opts map[string]interface{}) (Query, error) { + return newQuery(t, opts, s.opts) +} diff --git a/vendor/github.com/GoRethink/gorethink/utils.go b/vendor/github.com/GoRethink/gorethink/utils.go new file mode 100644 index 0000000..9da1e9a --- /dev/null +++ b/vendor/github.com/GoRethink/gorethink/utils.go @@ -0,0 +1,283 @@ +package gorethink + +import ( + "reflect" + "strconv" + "strings" + "sync/atomic" + + "gopkg.in/gorethink/gorethink.v2/encoding" + p "gopkg.in/gorethink/gorethink.v2/ql2" +) + +// Helper functions for constructing terms + +// constructRootTerm is an alias for creating a new term. +func constructRootTerm(name string, termType p.Term_TermType, args []interface{}, optArgs map[string]interface{}) Term { + return Term{ + name: name, + rootTerm: true, + termType: termType, + args: convertTermList(args), + optArgs: convertTermObj(optArgs), + } +} + +// constructMethodTerm is an alias for creating a new term. Unlike constructRootTerm +// this function adds the previous expression in the tree to the argument list to +// create a method term. +func constructMethodTerm(prevVal Term, name string, termType p.Term_TermType, args []interface{}, optArgs map[string]interface{}) Term { + args = append([]interface{}{prevVal}, args...) + + return Term{ + name: name, + rootTerm: false, + termType: termType, + args: convertTermList(args), + optArgs: convertTermObj(optArgs), + } +} + +// Helper functions for creating internal RQL types + +func newQuery(t Term, qopts map[string]interface{}, copts *ConnectOpts) (q Query, err error) { + queryOpts := map[string]interface{}{} + for k, v := range qopts { + queryOpts[k], err = Expr(v).Build() + if err != nil { + return + } + } + if copts.Database != "" { + queryOpts["db"], err = DB(copts.Database).Build() + if err != nil { + return + } + } + + builtTerm, err := t.Build() + if err != nil { + return q, err + } + + // Construct query + return Query{ + Type: p.Query_START, + Term: &t, + Opts: queryOpts, + builtTerm: builtTerm, + }, nil +} + +// makeArray takes a slice of terms and produces a single MAKE_ARRAY term +func makeArray(args termsList) Term { + return Term{ + name: "[...]", + termType: p.Term_MAKE_ARRAY, + args: args, + } +} + +// makeObject takes a map of terms and produces a single MAKE_OBJECT term +func makeObject(args termsObj) Term { + return Term{ + name: "{...}", + termType: p.Term_MAKE_OBJ, + optArgs: args, + } +} + +var nextVarID int64 + +func makeFunc(f interface{}) Term { + value := reflect.ValueOf(f) + valueType := value.Type() + + var argNums = make([]interface{}, valueType.NumIn()) + var args = make([]reflect.Value, valueType.NumIn()) + for i := 0; i < valueType.NumIn(); i++ { + // Get a slice of the VARs to use as the function arguments + varID := atomic.AddInt64(&nextVarID, 1) + args[i] = reflect.ValueOf(constructRootTerm("var", p.Term_VAR, []interface{}{varID}, map[string]interface{}{})) + argNums[i] = varID + + // make sure all input arguments are of type Term + argValueTypeName := valueType.In(i).String() + if argValueTypeName != "gorethink.Term" && argValueTypeName != "interface {}" { + panic("Function argument is not of type Term or interface {}") + } + } + + if valueType.NumOut() != 1 { + panic("Function does not have a single return value") + } + + body := value.Call(args)[0].Interface() + argsArr := makeArray(convertTermList(argNums)) + + return constructRootTerm("func", p.Term_FUNC, []interface{}{argsArr, body}, map[string]interface{}{}) +} + +func funcWrap(value interface{}) Term { + val := Expr(value) + + if implVarScan(val) && val.termType != p.Term_ARGS { + return makeFunc(func(x Term) Term { + return val + }) + } + return val +} + +func funcWrapArgs(args []interface{}) []interface{} { + for i, arg := range args { + args[i] = funcWrap(arg) + } + + return args +} + +// implVarScan recursivly checks a value to see if it contains an +// IMPLICIT_VAR term. If it does it returns true +func implVarScan(value Term) bool { + if value.termType == p.Term_IMPLICIT_VAR { + return true + } + for _, v := range value.args { + if implVarScan(v) { + return true + } + } + + for _, v := range value.optArgs { + if implVarScan(v) { + return true + } + } + + return false +} + +// Convert an opt args struct to a map. +func optArgsToMap(optArgs OptArgs) map[string]interface{} { + data, err := encode(optArgs) + + if err == nil && data != nil { + if m, ok := data.(map[string]interface{}); ok { + return m + } + } + + return map[string]interface{}{} +} + +// Convert a list into a slice of terms +func convertTermList(l []interface{}) termsList { + if len(l) == 0 { + return nil + } + + terms := make(termsList, len(l)) + for i, v := range l { + terms[i] = Expr(v) + } + + return terms +} + +// Convert a map into a map of terms +func convertTermObj(o map[string]interface{}) termsObj { + if len(o) == 0 { + return nil + } + + terms := make(termsObj, len(o)) + for k, v := range o { + terms[k] = Expr(v) + } + + return terms +} + +// Helper functions for debugging + +func allArgsToStringSlice(args termsList, optArgs termsObj) []string { + allArgs := make([]string, len(args)+len(optArgs)) + i := 0 + + for _, v := range args { + allArgs[i] = v.String() + i++ + } + for k, v := range optArgs { + allArgs[i] = k + "=" + v.String() + i++ + } + + return allArgs +} + +func argsToStringSlice(args termsList) []string { + allArgs := make([]string, len(args)) + + for i, v := range args { + allArgs[i] = v.String() + } + + return allArgs +} + +func optArgsToStringSlice(optArgs termsObj) []string { + allArgs := make([]string, len(optArgs)) + i := 0 + + for k, v := range optArgs { + allArgs[i] = k + "=" + v.String() + i++ + } + + return allArgs +} + +func splitAddress(address string) (hostname string, port int) { + hostname = "localhost" + port = 28015 + + addrParts := strings.Split(address, ":") + + if len(addrParts) >= 1 { + hostname = addrParts[0] + } + if len(addrParts) >= 2 { + port, _ = strconv.Atoi(addrParts[1]) + } + + return +} + +func encode(data interface{}) (interface{}, error) { + if _, ok := data.(Term); ok { + return data, nil + } + + v, err := encoding.Encode(data) + if err != nil { + return nil, err + } + + return v, nil +} + +// shouldRetryQuery checks the result of a query and returns true if the query +// should be retried +func shouldRetryQuery(q Query, err error) bool { + if err == nil { + return false + } + + if _, ok := err.(RQLConnectionError); ok { + return true + } + + return err == ErrConnectionClosed +} diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go new file mode 100644 index 0000000..b4c9e84 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go @@ -0,0 +1,64 @@ +package logrus + +// The following code was sourced and modified from the +// https://bitbucket.org/tebeka/atexit package governed by the following license: +// +// Copyright (c) 2012 Miki Tebeka . +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +// the Software, and to permit persons to whom the Software is furnished to do so, +// subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import ( + "fmt" + "os" +) + +var handlers = []func(){} + +func runHandler(handler func()) { + defer func() { + if err := recover(); err != nil { + fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err) + } + }() + + handler() +} + +func runHandlers() { + for _, handler := range handlers { + runHandler(handler) + } +} + +// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code) +func Exit(code int) { + runHandlers() + os.Exit(code) +} + +// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke +// all handlers. The handlers will also be invoked when any Fatal log entry is +// made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func RegisterExitHandler(handler func()) { + handlers = append(handlers, handler) +} diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go new file mode 100644 index 0000000..dddd5f8 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/doc.go @@ -0,0 +1,26 @@ +/* +Package logrus is a structured logger for Go, completely API compatible with the standard library logger. + + +The simplest way to use Logrus is simply the package-level exported logger: + + package main + + import ( + log "github.com/Sirupsen/logrus" + ) + + func main() { + log.WithFields(log.Fields{ + "animal": "walrus", + "number": 1, + "size": 10, + }).Info("A walrus appears") + } + +Output: + time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 + +For a full guide visit https://github.com/Sirupsen/logrus +*/ +package logrus diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go new file mode 100644 index 0000000..4edbe7a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/entry.go @@ -0,0 +1,275 @@ +package logrus + +import ( + "bytes" + "fmt" + "os" + "sync" + "time" +) + +var bufferPool *sync.Pool + +func init() { + bufferPool = &sync.Pool{ + New: func() interface{} { + return new(bytes.Buffer) + }, + } +} + +// Defines the key when adding errors using WithError. +var ErrorKey = "error" + +// An entry is the final or intermediate Logrus logging entry. It contains all +// the fields passed with WithField{,s}. It's finally logged when Debug, Info, +// Warn, Error, Fatal or Panic is called on it. These objects can be reused and +// passed around as much as you wish to avoid field duplication. +type Entry struct { + Logger *Logger + + // Contains all the fields set by the user. + Data Fields + + // Time at which the log entry was created + Time time.Time + + // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + Level Level + + // Message passed to Debug, Info, Warn, Error, Fatal or Panic + Message string + + // When formatter is called in entry.log(), an Buffer may be set to entry + Buffer *bytes.Buffer +} + +func NewEntry(logger *Logger) *Entry { + return &Entry{ + Logger: logger, + // Default is three fields, give a little extra room + Data: make(Fields, 5), + } +} + +// Returns the string representation from the reader and ultimately the +// formatter. +func (entry *Entry) String() (string, error) { + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + return "", err + } + str := string(serialized) + return str, nil +} + +// Add an error as single field (using the key defined in ErrorKey) to the Entry. +func (entry *Entry) WithError(err error) *Entry { + return entry.WithField(ErrorKey, err) +} + +// Add a single field to the Entry. +func (entry *Entry) WithField(key string, value interface{}) *Entry { + return entry.WithFields(Fields{key: value}) +} + +// Add a map of fields to the Entry. +func (entry *Entry) WithFields(fields Fields) *Entry { + data := make(Fields, len(entry.Data)+len(fields)) + for k, v := range entry.Data { + data[k] = v + } + for k, v := range fields { + data[k] = v + } + return &Entry{Logger: entry.Logger, Data: data} +} + +// This function is not declared with a pointer value because otherwise +// race conditions will occur when using multiple goroutines +func (entry Entry) log(level Level, msg string) { + var buffer *bytes.Buffer + entry.Time = time.Now() + entry.Level = level + entry.Message = msg + + if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) + entry.Logger.mu.Unlock() + } + buffer = bufferPool.Get().(*bytes.Buffer) + buffer.Reset() + defer bufferPool.Put(buffer) + entry.Buffer = buffer + serialized, err := entry.Logger.Formatter.Format(&entry) + entry.Buffer = nil + if err != nil { + entry.Logger.mu.Lock() + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + entry.Logger.mu.Unlock() + } else { + entry.Logger.mu.Lock() + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + entry.Logger.mu.Unlock() + } + + // To avoid Entry#log() returning a value that only would make sense for + // panic() to use in Entry#Panic(), we avoid the allocation by checking + // directly here. + if level <= PanicLevel { + panic(&entry) + } +} + +func (entry *Entry) Debug(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.log(DebugLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Print(args ...interface{}) { + entry.Info(args...) +} + +func (entry *Entry) Info(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.log(InfoLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warn(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.log(WarnLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Warning(args ...interface{}) { + entry.Warn(args...) +} + +func (entry *Entry) Error(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.log(ErrorLevel, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Fatal(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.log(FatalLevel, fmt.Sprint(args...)) + } + Exit(1) +} + +func (entry *Entry) Panic(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.log(PanicLevel, fmt.Sprint(args...)) + } + panic(fmt.Sprint(args...)) +} + +// Entry Printf family functions + +func (entry *Entry) Debugf(format string, args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Infof(format string, args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Printf(format string, args ...interface{}) { + entry.Infof(format, args...) +} + +func (entry *Entry) Warnf(format string, args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Warningf(format string, args ...interface{}) { + entry.Warnf(format, args...) +} + +func (entry *Entry) Errorf(format string, args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(fmt.Sprintf(format, args...)) + } +} + +func (entry *Entry) Fatalf(format string, args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(fmt.Sprintf(format, args...)) + } + Exit(1) +} + +func (entry *Entry) Panicf(format string, args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(fmt.Sprintf(format, args...)) + } +} + +// Entry Println family functions + +func (entry *Entry) Debugln(args ...interface{}) { + if entry.Logger.Level >= DebugLevel { + entry.Debug(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Infoln(args ...interface{}) { + if entry.Logger.Level >= InfoLevel { + entry.Info(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Println(args ...interface{}) { + entry.Infoln(args...) +} + +func (entry *Entry) Warnln(args ...interface{}) { + if entry.Logger.Level >= WarnLevel { + entry.Warn(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Warningln(args ...interface{}) { + entry.Warnln(args...) +} + +func (entry *Entry) Errorln(args ...interface{}) { + if entry.Logger.Level >= ErrorLevel { + entry.Error(entry.sprintlnn(args...)) + } +} + +func (entry *Entry) Fatalln(args ...interface{}) { + if entry.Logger.Level >= FatalLevel { + entry.Fatal(entry.sprintlnn(args...)) + } + Exit(1) +} + +func (entry *Entry) Panicln(args ...interface{}) { + if entry.Logger.Level >= PanicLevel { + entry.Panic(entry.sprintlnn(args...)) + } +} + +// Sprintlnn => Sprint no newline. This is to get the behavior of how +// fmt.Sprintln where spaces are always added between operands, regardless of +// their type. Instead of vendoring the Sprintln implementation to spare a +// string allocation, we do the simplest thing. +func (entry *Entry) sprintlnn(args ...interface{}) string { + msg := fmt.Sprintln(args...) + return msg[:len(msg)-1] +} diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go new file mode 100644 index 0000000..9a0120a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/exported.go @@ -0,0 +1,193 @@ +package logrus + +import ( + "io" +) + +var ( + // std is the name of the standard logger in stdlib `log` + std = New() +) + +func StandardLogger() *Logger { + return std +} + +// SetOutput sets the standard logger output. +func SetOutput(out io.Writer) { + std.mu.Lock() + defer std.mu.Unlock() + std.Out = out +} + +// SetFormatter sets the standard logger formatter. +func SetFormatter(formatter Formatter) { + std.mu.Lock() + defer std.mu.Unlock() + std.Formatter = formatter +} + +// SetLevel sets the standard logger level. +func SetLevel(level Level) { + std.mu.Lock() + defer std.mu.Unlock() + std.Level = level +} + +// GetLevel returns the standard logger level. +func GetLevel() Level { + std.mu.Lock() + defer std.mu.Unlock() + return std.Level +} + +// AddHook adds a hook to the standard logger hooks. +func AddHook(hook Hook) { + std.mu.Lock() + defer std.mu.Unlock() + std.Hooks.Add(hook) +} + +// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. +func WithError(err error) *Entry { + return std.WithField(ErrorKey, err) +} + +// WithField creates an entry from the standard logger and adds a field to +// it. If you want multiple fields, use `WithFields`. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithField(key string, value interface{}) *Entry { + return std.WithField(key, value) +} + +// WithFields creates an entry from the standard logger and adds multiple +// fields to it. This is simply a helper for `WithField`, invoking it +// once for each field. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithFields(fields Fields) *Entry { + return std.WithFields(fields) +} + +// Debug logs a message at level Debug on the standard logger. +func Debug(args ...interface{}) { + std.Debug(args...) +} + +// Print logs a message at level Info on the standard logger. +func Print(args ...interface{}) { + std.Print(args...) +} + +// Info logs a message at level Info on the standard logger. +func Info(args ...interface{}) { + std.Info(args...) +} + +// Warn logs a message at level Warn on the standard logger. +func Warn(args ...interface{}) { + std.Warn(args...) +} + +// Warning logs a message at level Warn on the standard logger. +func Warning(args ...interface{}) { + std.Warning(args...) +} + +// Error logs a message at level Error on the standard logger. +func Error(args ...interface{}) { + std.Error(args...) +} + +// Panic logs a message at level Panic on the standard logger. +func Panic(args ...interface{}) { + std.Panic(args...) +} + +// Fatal logs a message at level Fatal on the standard logger. +func Fatal(args ...interface{}) { + std.Fatal(args...) +} + +// Debugf logs a message at level Debug on the standard logger. +func Debugf(format string, args ...interface{}) { + std.Debugf(format, args...) +} + +// Printf logs a message at level Info on the standard logger. +func Printf(format string, args ...interface{}) { + std.Printf(format, args...) +} + +// Infof logs a message at level Info on the standard logger. +func Infof(format string, args ...interface{}) { + std.Infof(format, args...) +} + +// Warnf logs a message at level Warn on the standard logger. +func Warnf(format string, args ...interface{}) { + std.Warnf(format, args...) +} + +// Warningf logs a message at level Warn on the standard logger. +func Warningf(format string, args ...interface{}) { + std.Warningf(format, args...) +} + +// Errorf logs a message at level Error on the standard logger. +func Errorf(format string, args ...interface{}) { + std.Errorf(format, args...) +} + +// Panicf logs a message at level Panic on the standard logger. +func Panicf(format string, args ...interface{}) { + std.Panicf(format, args...) +} + +// Fatalf logs a message at level Fatal on the standard logger. +func Fatalf(format string, args ...interface{}) { + std.Fatalf(format, args...) +} + +// Debugln logs a message at level Debug on the standard logger. +func Debugln(args ...interface{}) { + std.Debugln(args...) +} + +// Println logs a message at level Info on the standard logger. +func Println(args ...interface{}) { + std.Println(args...) +} + +// Infoln logs a message at level Info on the standard logger. +func Infoln(args ...interface{}) { + std.Infoln(args...) +} + +// Warnln logs a message at level Warn on the standard logger. +func Warnln(args ...interface{}) { + std.Warnln(args...) +} + +// Warningln logs a message at level Warn on the standard logger. +func Warningln(args ...interface{}) { + std.Warningln(args...) +} + +// Errorln logs a message at level Error on the standard logger. +func Errorln(args ...interface{}) { + std.Errorln(args...) +} + +// Panicln logs a message at level Panic on the standard logger. +func Panicln(args ...interface{}) { + std.Panicln(args...) +} + +// Fatalln logs a message at level Fatal on the standard logger. +func Fatalln(args ...interface{}) { + std.Fatalln(args...) +} diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go new file mode 100644 index 0000000..b5fbe93 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/formatter.go @@ -0,0 +1,45 @@ +package logrus + +import "time" + +const DefaultTimestampFormat = time.RFC3339 + +// The Formatter interface is used to implement a custom Formatter. It takes an +// `Entry`. It exposes all the fields, including the default ones: +// +// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. +// * `entry.Data["time"]`. The timestamp. +// * `entry.Data["level"]. The level the entry was logged at. +// +// Any additional fields added with `WithField` or `WithFields` are also in +// `entry.Data`. Format is expected to return an array of bytes which are then +// logged to `logger.Out`. +type Formatter interface { + Format(*Entry) ([]byte, error) +} + +// This is to not silently overwrite `time`, `msg` and `level` fields when +// dumping it. If this code wasn't there doing: +// +// logrus.WithField("level", 1).Info("hello") +// +// Would just silently drop the user provided level. Instead with this code +// it'll logged as: +// +// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} +// +// It's not exported because it's still using Data in an opinionated way. It's to +// avoid code duplication between the two default formatters. +func prefixFieldClashes(data Fields) { + if t, ok := data["time"]; ok { + data["fields.time"] = t + } + + if m, ok := data["msg"]; ok { + data["fields.msg"] = m + } + + if l, ok := data["level"]; ok { + data["fields.level"] = l + } +} diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go new file mode 100644 index 0000000..3f151cd --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/hooks.go @@ -0,0 +1,34 @@ +package logrus + +// A hook to be fired when logging on the logging levels returned from +// `Levels()` on your implementation of the interface. Note that this is not +// fired in a goroutine or a channel with workers, you should handle such +// functionality yourself if your call is non-blocking and you don't wish for +// the logging calls for levels returned from `Levels()` to block. +type Hook interface { + Levels() []Level + Fire(*Entry) error +} + +// Internal type for storing the hooks on a logger instance. +type LevelHooks map[Level][]Hook + +// Add a hook to an instance of logger. This is called with +// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. +func (hooks LevelHooks) Add(hook Hook) { + for _, level := range hook.Levels() { + hooks[level] = append(hooks[level], hook) + } +} + +// Fire all the hooks for the passed level. Used by `entry.log` to fire +// appropriate hooks for a log entry. +func (hooks LevelHooks) Fire(level Level, entry *Entry) error { + for _, hook := range hooks[level] { + if err := hook.Fire(entry); err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go new file mode 100644 index 0000000..266554e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go @@ -0,0 +1,74 @@ +package logrus + +import ( + "encoding/json" + "fmt" +) + +type fieldKey string +type FieldMap map[fieldKey]string + +const ( + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" +) + +func (f FieldMap) resolve(key fieldKey) string { + if k, ok := f[key]; ok { + return k + } + + return string(key) +} + +type JSONFormatter struct { + // TimestampFormat sets the format used for marshaling timestamps. + TimestampFormat string + + // DisableTimestamp allows disabling automatic timestamps in output + DisableTimestamp bool + + // FieldMap allows users to customize the names of keys for various fields. + // As an example: + // formatter := &JSONFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyLevel: "@message", + // }, + // } + FieldMap FieldMap +} + +func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { + data := make(Fields, len(entry.Data)+3) + for k, v := range entry.Data { + switch v := v.(type) { + case error: + // Otherwise errors are ignored by `encoding/json` + // https://github.com/Sirupsen/logrus/issues/137 + data[k] = v.Error() + default: + data[k] = v + } + } + prefixFieldClashes(data) + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + + if !f.DisableTimestamp { + data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) + } + data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message + data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() + + serialized, err := json.Marshal(data) + if err != nil { + return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + } + return append(serialized, '\n'), nil +} diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go new file mode 100644 index 0000000..b769f3d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logger.go @@ -0,0 +1,308 @@ +package logrus + +import ( + "io" + "os" + "sync" +) + +type Logger struct { + // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a + // file, or leave it default which is `os.Stderr`. You can also set this to + // something more adventorous, such as logging to Kafka. + Out io.Writer + // Hooks for the logger instance. These allow firing events based on logging + // levels and log entries. For example, to send errors to an error tracking + // service, log to StatsD or dump the core on fatal errors. + Hooks LevelHooks + // All log entries pass through the formatter before logged to Out. The + // included formatters are `TextFormatter` and `JSONFormatter` for which + // TextFormatter is the default. In development (when a TTY is attached) it + // logs with colors, but to a file it wouldn't. You can easily implement your + // own that implements the `Formatter` interface, see the `README` or included + // formatters for examples. + Formatter Formatter + // The logging level the logger should log at. This is typically (and defaults + // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be + // logged. `logrus.Debug` is useful in + Level Level + // Used to sync writing to the log. Locking is enabled by Default + mu MutexWrap + // Reusable empty entry + entryPool sync.Pool +} + +type MutexWrap struct { + lock sync.Mutex + disabled bool +} + +func (mw *MutexWrap) Lock() { + if !mw.disabled { + mw.lock.Lock() + } +} + +func (mw *MutexWrap) Unlock() { + if !mw.disabled { + mw.lock.Unlock() + } +} + +func (mw *MutexWrap) Disable() { + mw.disabled = true +} + +// Creates a new logger. Configuration should be set by changing `Formatter`, +// `Out` and `Hooks` directly on the default logger instance. You can also just +// instantiate your own: +// +// var log = &Logger{ +// Out: os.Stderr, +// Formatter: new(JSONFormatter), +// Hooks: make(LevelHooks), +// Level: logrus.DebugLevel, +// } +// +// It's recommended to make this a global instance called `log`. +func New() *Logger { + return &Logger{ + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + } +} + +func (logger *Logger) newEntry() *Entry { + entry, ok := logger.entryPool.Get().(*Entry) + if ok { + return entry + } + return NewEntry(logger) +} + +func (logger *Logger) releaseEntry(entry *Entry) { + logger.entryPool.Put(entry) +} + +// Adds a field to the log entry, note that it doesn't log until you call +// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// If you want multiple fields, use `WithFields`. +func (logger *Logger) WithField(key string, value interface{}) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithField(key, value) +} + +// Adds a struct of fields to the log entry. All it does is call `WithField` for +// each `Field`. +func (logger *Logger) WithFields(fields Fields) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithFields(fields) +} + +// Add an error as single field to the log entry. All it does is call +// `WithError` for the given `error`. +func (logger *Logger) WithError(err error) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithError(err) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infof(format string, args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infof(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Printf(format string, args ...interface{}) { + entry := logger.newEntry() + entry.Printf(format, args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningf(format string, args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorf(format string, args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalf(format string, args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalf(format, args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicf(format string, args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicf(format, args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debug(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debug(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Info(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Print(args ...interface{}) { + entry := logger.newEntry() + entry.Info(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warn(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warning(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warn(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Error(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Error(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatal(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatal(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panic(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panic(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Debugln(args ...interface{}) { + if logger.Level >= DebugLevel { + entry := logger.newEntry() + entry.Debugln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Infoln(args ...interface{}) { + if logger.Level >= InfoLevel { + entry := logger.newEntry() + entry.Infoln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Println(args ...interface{}) { + entry := logger.newEntry() + entry.Println(args...) + logger.releaseEntry(entry) +} + +func (logger *Logger) Warnln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Warningln(args ...interface{}) { + if logger.Level >= WarnLevel { + entry := logger.newEntry() + entry.Warnln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Errorln(args ...interface{}) { + if logger.Level >= ErrorLevel { + entry := logger.newEntry() + entry.Errorln(args...) + logger.releaseEntry(entry) + } +} + +func (logger *Logger) Fatalln(args ...interface{}) { + if logger.Level >= FatalLevel { + entry := logger.newEntry() + entry.Fatalln(args...) + logger.releaseEntry(entry) + } + Exit(1) +} + +func (logger *Logger) Panicln(args ...interface{}) { + if logger.Level >= PanicLevel { + entry := logger.newEntry() + entry.Panicln(args...) + logger.releaseEntry(entry) + } +} + +//When file is opened with appending mode, it's safe to +//write concurrently to a file (within 4k message on Linux). +//In these cases user can choose to disable the lock. +func (logger *Logger) SetNoLock() { + logger.mu.Disable() +} diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go new file mode 100644 index 0000000..e596691 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/logrus.go @@ -0,0 +1,143 @@ +package logrus + +import ( + "fmt" + "log" + "strings" +) + +// Fields type, used to pass to `WithFields`. +type Fields map[string]interface{} + +// Level type +type Level uint8 + +// Convert the Level to a string. E.g. PanicLevel becomes "panic". +func (level Level) String() string { + switch level { + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warning" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + } + + return "unknown" +} + +// ParseLevel takes a string level and returns the Logrus log level constant. +func ParseLevel(lvl string) (Level, error) { + switch strings.ToLower(lvl) { + case "panic": + return PanicLevel, nil + case "fatal": + return FatalLevel, nil + case "error": + return ErrorLevel, nil + case "warn", "warning": + return WarnLevel, nil + case "info": + return InfoLevel, nil + case "debug": + return DebugLevel, nil + } + + var l Level + return l, fmt.Errorf("not a valid logrus Level: %q", lvl) +} + +// A constant exposing all logging levels +var AllLevels = []Level{ + PanicLevel, + FatalLevel, + ErrorLevel, + WarnLevel, + InfoLevel, + DebugLevel, +} + +// These are the different logging levels. You can set the logging level to log +// on your instance of logger, obtained with `logrus.New()`. +const ( + // PanicLevel level, highest level of severity. Logs and then calls panic with the + // message passed to Debug, Info, ... + PanicLevel Level = iota + // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // logging level is set to Panic. + FatalLevel + // ErrorLevel level. Logs. Used for errors that should definitely be noted. + // Commonly used for hooks to send errors to an error tracking service. + ErrorLevel + // WarnLevel level. Non-critical entries that deserve eyes. + WarnLevel + // InfoLevel level. General operational entries about what's going on inside the + // application. + InfoLevel + // DebugLevel level. Usually only enabled when debugging. Very verbose logging. + DebugLevel +) + +// Won't compile if StdLogger can't be realized by a log.Logger +var ( + _ StdLogger = &log.Logger{} + _ StdLogger = &Entry{} + _ StdLogger = &Logger{} +) + +// StdLogger is what your logrus-enabled library should take, that way +// it'll accept a stdlib logger and a logrus logger. There's no standard +// interface, this is the closest we get, unfortunately. +type StdLogger interface { + Print(...interface{}) + Printf(string, ...interface{}) + Println(...interface{}) + + Fatal(...interface{}) + Fatalf(string, ...interface{}) + Fatalln(...interface{}) + + Panic(...interface{}) + Panicf(string, ...interface{}) + Panicln(...interface{}) +} + +// The FieldLogger interface generalizes the Entry and Logger types +type FieldLogger interface { + WithField(key string, value interface{}) *Entry + WithFields(fields Fields) *Entry + WithError(err error) *Entry + + Debugf(format string, args ...interface{}) + Infof(format string, args ...interface{}) + Printf(format string, args ...interface{}) + Warnf(format string, args ...interface{}) + Warningf(format string, args ...interface{}) + Errorf(format string, args ...interface{}) + Fatalf(format string, args ...interface{}) + Panicf(format string, args ...interface{}) + + Debug(args ...interface{}) + Info(args ...interface{}) + Print(args ...interface{}) + Warn(args ...interface{}) + Warning(args ...interface{}) + Error(args ...interface{}) + Fatal(args ...interface{}) + Panic(args ...interface{}) + + Debugln(args ...interface{}) + Infoln(args ...interface{}) + Println(args ...interface{}) + Warnln(args ...interface{}) + Warningln(args ...interface{}) + Errorln(args ...interface{}) + Fatalln(args ...interface{}) + Panicln(args ...interface{}) +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go new file mode 100644 index 0000000..1960169 --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go @@ -0,0 +1,8 @@ +// +build appengine + +package logrus + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + return true +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go new file mode 100644 index 0000000..5f6be4d --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go @@ -0,0 +1,10 @@ +// +build darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TIOCGETA + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go new file mode 100644 index 0000000..308160c --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go @@ -0,0 +1,14 @@ +// Based on ssh/terminal: +// Copyright 2013 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine + +package logrus + +import "syscall" + +const ioctlReadTermios = syscall.TCGETS + +type Termios syscall.Termios diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go new file mode 100644 index 0000000..329038f --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go @@ -0,0 +1,22 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build linux darwin freebsd openbsd netbsd dragonfly +// +build !appengine + +package logrus + +import ( + "syscall" + "unsafe" +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var termios Termios + _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) + return err == 0 +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go new file mode 100644 index 0000000..a3c6f6e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go @@ -0,0 +1,15 @@ +// +build solaris,!appengine + +package logrus + +import ( + "os" + + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func IsTerminal() bool { + _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go new file mode 100644 index 0000000..3727e8a --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go @@ -0,0 +1,27 @@ +// Based on ssh/terminal: +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build windows,!appengine + +package logrus + +import ( + "syscall" + "unsafe" +) + +var kernel32 = syscall.NewLazyDLL("kernel32.dll") + +var ( + procGetConsoleMode = kernel32.NewProc("GetConsoleMode") +) + +// IsTerminal returns true if stderr's file descriptor is a terminal. +func IsTerminal() bool { + fd := syscall.Stderr + var st uint32 + r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) + return r != 0 && e == 0 +} diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go new file mode 100644 index 0000000..20f2d7e --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go @@ -0,0 +1,170 @@ +package logrus + +import ( + "bytes" + "fmt" + "runtime" + "sort" + "strings" + "time" +) + +const ( + nocolor = 0 + red = 31 + green = 32 + yellow = 33 + blue = 34 + gray = 37 +) + +var ( + baseTimestamp time.Time + isTerminal bool +) + +func init() { + baseTimestamp = time.Now() + isTerminal = IsTerminal() +} + +func miniTS() int { + return int(time.Since(baseTimestamp) / time.Second) +} + +type TextFormatter struct { + // Set to true to bypass checking for a TTY before outputting colors. + ForceColors bool + + // Force disabling colors. + DisableColors bool + + // Disable timestamp logging. useful when output is redirected to logging + // system that already adds timestamps. + DisableTimestamp bool + + // Enable logging the full timestamp when a TTY is attached instead of just + // the time passed since beginning of execution. + FullTimestamp bool + + // TimestampFormat to use for display when a full timestamp is printed + TimestampFormat string + + // The fields are sorted by default for a consistent output. For applications + // that log extremely frequently and don't use the JSON formatter this may not + // be desired. + DisableSorting bool +} + +func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { + var b *bytes.Buffer + var keys []string = make([]string, 0, len(entry.Data)) + for k := range entry.Data { + keys = append(keys, k) + } + + if !f.DisableSorting { + sort.Strings(keys) + } + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + prefixFieldClashes(entry.Data) + + isColorTerminal := isTerminal && (runtime.GOOS != "windows") + isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors + + timestampFormat := f.TimestampFormat + if timestampFormat == "" { + timestampFormat = DefaultTimestampFormat + } + if isColored { + f.printColored(b, entry, keys, timestampFormat) + } else { + if !f.DisableTimestamp { + f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) + } + f.appendKeyValue(b, "level", entry.Level.String()) + if entry.Message != "" { + f.appendKeyValue(b, "msg", entry.Message) + } + for _, key := range keys { + f.appendKeyValue(b, key, entry.Data[key]) + } + } + + b.WriteByte('\n') + return b.Bytes(), nil +} + +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { + var levelColor int + switch entry.Level { + case DebugLevel: + levelColor = gray + case WarnLevel: + levelColor = yellow + case ErrorLevel, FatalLevel, PanicLevel: + levelColor = red + default: + levelColor = blue + } + + levelText := strings.ToUpper(entry.Level.String())[0:4] + + if f.DisableTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + } else if !f.FullTimestamp { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) + } else { + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + } + for _, k := range keys { + v := entry.Data[k] + fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) + f.appendValue(b, v) + } +} + +func needsQuoting(text string) bool { + for _, ch := range text { + if !((ch >= 'a' && ch <= 'z') || + (ch >= 'A' && ch <= 'Z') || + (ch >= '0' && ch <= '9') || + ch == '-' || ch == '.') { + return true + } + } + return false +} + +func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { + + b.WriteString(key) + b.WriteByte('=') + f.appendValue(b, value) + b.WriteByte(' ') +} + +func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) { + switch value := value.(type) { + case string: + if !needsQuoting(value) { + b.WriteString(value) + } else { + fmt.Fprintf(b, "%q", value) + } + case error: + errmsg := value.Error() + if !needsQuoting(errmsg) { + b.WriteString(errmsg) + } else { + fmt.Fprintf(b, "%q", errmsg) + } + default: + fmt.Fprint(b, value) + } +} diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go new file mode 100644 index 0000000..f74d2aa --- /dev/null +++ b/vendor/github.com/Sirupsen/logrus/writer.go @@ -0,0 +1,53 @@ +package logrus + +import ( + "bufio" + "io" + "runtime" +) + +func (logger *Logger) Writer() *io.PipeWriter { + return logger.WriterLevel(InfoLevel) +} + +func (logger *Logger) WriterLevel(level Level) *io.PipeWriter { + reader, writer := io.Pipe() + + var printFunc func(args ...interface{}) + switch level { + case DebugLevel: + printFunc = logger.Debug + case InfoLevel: + printFunc = logger.Info + case WarnLevel: + printFunc = logger.Warn + case ErrorLevel: + printFunc = logger.Error + case FatalLevel: + printFunc = logger.Fatal + case PanicLevel: + printFunc = logger.Panic + default: + printFunc = logger.Print + } + + go logger.writerScanner(reader, printFunc) + runtime.SetFinalizer(writer, writerFinalizer) + + return writer +} + +func (logger *Logger) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) { + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + printFunc(scanner.Text()) + } + if err := scanner.Err(); err != nil { + logger.Errorf("Error while reading from Writer: %s", err) + } + reader.Close() +} + +func writerFinalizer(writer *io.PipeWriter) { + writer.Close() +} diff --git a/vendor/github.com/Xe/uuid/dce.go b/vendor/github.com/Xe/uuid/dce.go new file mode 100644 index 0000000..50a0f2d --- /dev/null +++ b/vendor/github.com/Xe/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/Xe/uuid/doc.go b/vendor/github.com/Xe/uuid/doc.go new file mode 100644 index 0000000..d8bd013 --- /dev/null +++ b/vendor/github.com/Xe/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/vendor/github.com/Xe/uuid/hash.go b/vendor/github.com/Xe/uuid/hash.go new file mode 100644 index 0000000..cdd4192 --- /dev/null +++ b/vendor/github.com/Xe/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID dervied from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/Xe/uuid/node.go b/vendor/github.com/Xe/uuid/node.go new file mode 100644 index 0000000..dd0a8ac --- /dev/null +++ b/vendor/github.com/Xe/uuid/node.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "net" + +var ( + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + if nodeID == nil { + SetNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/Xe/uuid/time.go b/vendor/github.com/Xe/uuid/time.go new file mode 100644 index 0000000..b9369c2 --- /dev/null +++ b/vendor/github.com/Xe/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + mu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// adjusts the clock sequence as needed. An error is returned if the current +// time cannot be determined. +func GetTime() (Time, error) { + defer mu.Unlock() + mu.Lock() + return getTime() +} + +func getTime() (Time, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer mu.Unlock() + mu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer mu.Unlock() + mu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/Xe/uuid/util.go b/vendor/github.com/Xe/uuid/util.go new file mode 100644 index 0000000..de40b10 --- /dev/null +++ b/vendor/github.com/Xe/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/Xe/uuid/uuid.go b/vendor/github.com/Xe/uuid/uuid.go new file mode 100644 index 0000000..2920fae --- /dev/null +++ b/vendor/github.com/Xe/uuid/uuid.go @@ -0,0 +1,163 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } + panic("unreachable") +} + +// Version returns the verison of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/Xe/uuid/version1.go b/vendor/github.com/Xe/uuid/version1.go new file mode 100644 index 0000000..6358004 --- /dev/null +++ b/vendor/github.com/Xe/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], clock_seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/vendor/github.com/Xe/uuid/version4.go b/vendor/github.com/Xe/uuid/version4.go new file mode 100644 index 0000000..b3d4a36 --- /dev/null +++ b/vendor/github.com/Xe/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/vendor/github.com/Yawning/bulb/cmd_authenticate.go b/vendor/github.com/Yawning/bulb/cmd_authenticate.go new file mode 100644 index 0000000..aba7bd6 --- /dev/null +++ b/vendor/github.com/Yawning/bulb/cmd_authenticate.go @@ -0,0 +1,137 @@ +// cmd_authenticate.go - AUTHENTICATE/AUTHCHALLENGE commands. +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +package bulb + +import ( + "crypto/hmac" + "crypto/rand" + "crypto/sha256" + "encoding/hex" + "io/ioutil" + "strings" +) + +// Authenticate authenticates with the Tor instance using the "best" possible +// authentication method. The password argument is entirely optional, and will +// only be used if the "SAFECOOKE" and "NULL" authentication methods are not +// available and "HASHEDPASSWORD" is. +func (c *Conn) Authenticate(password string) error { + if c.isAuthenticated { + return nil + } + + // Determine the supported authentication methods, and the cookie path. + pi, err := c.ProtocolInfo() + if err != nil { + return err + } + + // "COOKIE" authentication exists, but anything modern supports + // "SAFECOOKIE". + const ( + cmdAuthenticate = "AUTHENTICATE" + authMethodNull = "NULL" + authMethodPassword = "HASHEDPASSWORD" + authMethodSafeCookie = "SAFECOOKIE" + ) + if pi.AuthMethods[authMethodNull] { + _, err = c.Request(cmdAuthenticate) + c.isAuthenticated = err == nil + return err + } else if pi.AuthMethods[authMethodSafeCookie] { + const ( + authCookieLength = 32 + authNonceLength = 32 + authHashLength = 32 + + authServerHashKey = "Tor safe cookie authentication server-to-controller hash" + authClientHashKey = "Tor safe cookie authentication controller-to-server hash" + ) + + if pi.CookieFile == "" { + return newProtocolError("invalid (empty) COOKIEFILE") + } + cookie, err := ioutil.ReadFile(pi.CookieFile) + if err != nil { + return newProtocolError("failed to read COOKIEFILE: %v", err) + } else if len(cookie) != authCookieLength { + return newProtocolError("invalid cookie file length: %d", len(cookie)) + } + + // Send an AUTHCHALLENGE command, and parse the response. + var clientNonce [authNonceLength]byte + if _, err := rand.Read(clientNonce[:]); err != nil { + return newProtocolError("failed to generate clientNonce: %v", err) + } + clientNonceStr := hex.EncodeToString(clientNonce[:]) + resp, err := c.Request("AUTHCHALLENGE %s %s", authMethodSafeCookie, clientNonceStr) + if err != nil { + return err + } + splitResp := strings.Split(resp.Reply, " ") + if len(splitResp) != 3 { + return newProtocolError("invalid AUTHCHALLENGE response") + } + serverHashStr := strings.TrimPrefix(splitResp[1], "SERVERHASH=") + if serverHashStr == splitResp[1] { + return newProtocolError("missing SERVERHASH") + } + serverHash, err := hex.DecodeString(serverHashStr) + if err != nil { + return newProtocolError("failed to decode ServerHash: %v", err) + } + if len(serverHash) != authHashLength { + return newProtocolError("invalid ServerHash length: %d", len(serverHash)) + } + serverNonceStr := strings.TrimPrefix(splitResp[2], "SERVERNONCE=") + if serverNonceStr == splitResp[2] { + return newProtocolError("missing SERVERNONCE") + } + serverNonce, err := hex.DecodeString(serverNonceStr) + if err != nil { + return newProtocolError("failed to decode ServerNonce: %v", err) + } + if len(serverNonce) != authNonceLength { + return newProtocolError("invalid ServerNonce length: %d", len(serverNonce)) + } + + // Validate the ServerHash. + m := hmac.New(sha256.New, []byte(authServerHashKey)) + m.Write(cookie) + m.Write(clientNonce[:]) + m.Write(serverNonce) + dervServerHash := m.Sum(nil) + if !hmac.Equal(serverHash, dervServerHash) { + return newProtocolError("invalid ServerHash: mismatch") + } + + // Calculate the ClientHash, and issue the AUTHENTICATE. + m = hmac.New(sha256.New, []byte(authClientHashKey)) + m.Write(cookie) + m.Write(clientNonce[:]) + m.Write(serverNonce) + clientHash := m.Sum(nil) + clientHashStr := hex.EncodeToString(clientHash) + + _, err = c.Request("%s %s", cmdAuthenticate, clientHashStr) + c.isAuthenticated = err == nil + return err + } else if pi.AuthMethods[authMethodPassword] { + // Despite the name HASHEDPASSWORD, the raw password is actually sent. + // According to the code, this can either be a QuotedString, or base16 + // encoded, so go with the later since it's easier to handle. + if password == "" { + return newProtocolError("password auth needs a password") + } + passwordStr := hex.EncodeToString([]byte(password)) + _, err = c.Request("%s %s", cmdAuthenticate, passwordStr) + c.isAuthenticated = err == nil + return err + } + return newProtocolError("no supported authentication methods") +} diff --git a/vendor/github.com/Yawning/bulb/cmd_onion.go b/vendor/github.com/Yawning/bulb/cmd_onion.go new file mode 100644 index 0000000..2c62417 --- /dev/null +++ b/vendor/github.com/Yawning/bulb/cmd_onion.go @@ -0,0 +1,149 @@ +// cmd_onion.go - various onion service commands: ADD_ONION, DEL_ONION... +// +// To the extent possible under law, David Stainton waived all copyright +// and related or neighboring rights to this module of bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +package bulb + +import ( + "crypto" + "crypto/rsa" + "encoding/base64" + "fmt" + "strings" + + "github.com/yawning/bulb/utils/pkcs1" +) + +// OnionInfo is the result of the AddOnion command. +type OnionInfo struct { + OnionID string + PrivateKey crypto.PrivateKey + + RawResponse *Response +} + +// OnionPrivateKey is a unknown Onion private key (crypto.PublicKey). +type OnionPrivateKey struct { + KeyType string + Key string +} + +// OnionPortSpec is a Onion VirtPort/Target pair. +type OnionPortSpec struct { + VirtPort uint16 + Target string +} + +// AddOnion issues an ADD_ONION command and returns the parsed response. +func (c *Conn) AddOnion(ports []OnionPortSpec, key crypto.PrivateKey, oneshot bool) (*OnionInfo, error) { + const keyTypeRSA = "RSA1024" + var err error + + var portStr string + if ports == nil { + return nil, newProtocolError("invalid port specification") + } + for _, v := range ports { + portStr += fmt.Sprintf(" Port=%d", v.VirtPort) + if v.Target != "" { + portStr += "," + v.Target + } + } + + var hsKeyType, hsKeyStr string + if key != nil { + switch t := key.(type) { + case *rsa.PrivateKey: + rsaPK, _ := key.(*rsa.PrivateKey) + if rsaPK.N.BitLen() != 1024 { + return nil, newProtocolError("invalid RSA key size") + } + pkDER, err := pkcs1.EncodePrivateKeyDER(rsaPK) + if err != nil { + return nil, newProtocolError("failed to serialize RSA key: %v", err) + } + hsKeyType = keyTypeRSA + hsKeyStr = base64.StdEncoding.EncodeToString(pkDER) + case *OnionPrivateKey: + genericPK, _ := key.(*OnionPrivateKey) + hsKeyType = genericPK.KeyType + hsKeyStr = genericPK.Key + default: + return nil, newProtocolError("unsupported private key type: %v", t) + } + } + + var resp *Response + if hsKeyStr == "" { + flags := " Flags=DiscardPK" + if !oneshot { + flags = "" + } + resp, err = c.Request("ADD_ONION NEW:BEST%s%s", portStr, flags) + } else { + resp, err = c.Request("ADD_ONION %s:%s%s", hsKeyType, hsKeyStr, portStr) + } + if err != nil { + return nil, err + } + + // Parse out the response. + var serviceID string + var hsPrivateKey crypto.PrivateKey + for _, l := range resp.Data { + const ( + serviceIDPrefix = "ServiceID=" + privateKeyPrefix = "PrivateKey=" + ) + + if strings.HasPrefix(l, serviceIDPrefix) { + serviceID = strings.TrimPrefix(l, serviceIDPrefix) + } else if strings.HasPrefix(l, privateKeyPrefix) { + if oneshot || hsKeyStr != "" { + return nil, newProtocolError("received an unexpected private key") + } + hsKeyStr = strings.TrimPrefix(l, privateKeyPrefix) + splitKey := strings.SplitN(hsKeyStr, ":", 2) + if len(splitKey) != 2 { + return nil, newProtocolError("failed to parse private key type") + } + + switch splitKey[0] { + case keyTypeRSA: + keyBlob, err := base64.StdEncoding.DecodeString(splitKey[1]) + if err != nil { + return nil, newProtocolError("failed to base64 decode RSA key: %v", err) + } + hsPrivateKey, _, err = pkcs1.DecodePrivateKeyDER(keyBlob) + if err != nil { + return nil, newProtocolError("failed to deserialize RSA key: %v", err) + } + default: + hsPrivateKey := new(OnionPrivateKey) + hsPrivateKey.KeyType = splitKey[0] + hsPrivateKey.Key = splitKey[1] + } + } + } + if serviceID == "" { + // This should *NEVER* happen, since the command succeded, and the spec + // guarantees that this will always be present. + return nil, newProtocolError("failed to determine service ID") + } + + oi := new(OnionInfo) + oi.RawResponse = resp + oi.OnionID = serviceID + oi.PrivateKey = hsPrivateKey + + return oi, nil +} + +// DeleteOnion issues a DEL_ONION command and returns the parsed response. +func (c *Conn) DeleteOnion(serviceID string) error { + _, err := c.Request("DEL_ONION %s", serviceID) + return err +} diff --git a/vendor/github.com/Yawning/bulb/cmd_protocolinfo.go b/vendor/github.com/Yawning/bulb/cmd_protocolinfo.go new file mode 100644 index 0000000..44685b0 --- /dev/null +++ b/vendor/github.com/Yawning/bulb/cmd_protocolinfo.go @@ -0,0 +1,95 @@ +// cmd_protocolinfo.go - PROTOCOLINFO command. +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +package bulb + +import ( + "strconv" + "strings" + + "github.com/yawning/bulb/utils" +) + +// ProtocolInfo is the result of the ProtocolInfo command. +type ProtocolInfo struct { + AuthMethods map[string]bool + CookieFile string + TorVersion string + + RawResponse *Response +} + +// ProtocolInfo issues a PROTOCOLINFO command and returns the parsed response. +func (c *Conn) ProtocolInfo() (*ProtocolInfo, error) { + // In the pre-authentication state, only one PROTOCOLINFO command + // may be issued. Cache the value returned so that subsequent + // calls continue to work. + if !c.isAuthenticated && c.cachedPI != nil { + return c.cachedPI, nil + } + + resp, err := c.Request("PROTOCOLINFO") + if err != nil { + return nil, err + } + + // Parse out the PIVERSION to make sure it speaks something we understand. + if len(resp.Data) < 1 { + return nil, newProtocolError("missing PIVERSION") + } + switch resp.Data[0] { + case "1": + return nil, newProtocolError("invalid PIVERSION: '%s'", resp.Reply) + default: + } + + // Parse out the rest of the lines. + pi := new(ProtocolInfo) + pi.RawResponse = resp + pi.AuthMethods = make(map[string]bool) + for i := 1; i < len(resp.Data); i++ { + splitLine := utils.SplitQuoted(resp.Data[i], '"', ' ') + switch splitLine[0] { + case "AUTH": + // Parse an AuthLine detailing how to authenticate. + if len(splitLine) < 2 { + continue + } + methods := strings.TrimPrefix(splitLine[1], "METHODS=") + if methods == splitLine[1] { + continue + } + for _, meth := range strings.Split(methods, ",") { + pi.AuthMethods[meth] = true + } + + if len(splitLine) < 3 { + continue + } + cookiePath := strings.TrimPrefix(splitLine[2], "COOKIEFILE=") + if cookiePath == splitLine[2] { + continue + } + pi.CookieFile, _ = strconv.Unquote(cookiePath) + case "VERSION": + // Parse a VersionLine detailing the Tor version. + if len(splitLine) < 2 { + continue + } + torVersion := strings.TrimPrefix(splitLine[1], "Tor=") + if torVersion == splitLine[1] { + continue + } + pi.TorVersion, _ = strconv.Unquote(torVersion) + default: // MUST ignore unsupported InfoLines. + } + } + if !c.isAuthenticated { + c.cachedPI = pi + } + return pi, nil +} diff --git a/vendor/github.com/Yawning/bulb/conn.go b/vendor/github.com/Yawning/bulb/conn.go new file mode 100644 index 0000000..a2613a4 --- /dev/null +++ b/vendor/github.com/Yawning/bulb/conn.go @@ -0,0 +1,233 @@ +// conn.go - Controller connection instance. +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +// Package bulb is a Go language interface to a Tor control port. +package bulb + +import ( + "errors" + gofmt "fmt" + "io" + "log" + "net" + "net/textproto" + "sync" +) + +const ( + maxEventBacklog = 16 + maxResponseBacklog = 16 +) + +// ErrNoAsyncReader is the error returned when the asynchronous event handling +// is requested, but the helper go routine has not been started. +var ErrNoAsyncReader = errors.New("event requested without an async reader") + +// Conn is a control port connection instance. +type Conn struct { + conn *textproto.Conn + isAuthenticated bool + debugLog bool + cachedPI *ProtocolInfo + + asyncReaderLock sync.Mutex + asyncReaderRunning bool + eventChan chan *Response + respChan chan *Response + closeWg sync.WaitGroup + + rdErrLock sync.Mutex + rdErr error +} + +func (c *Conn) setRdErr(err error, force bool) { + c.rdErrLock.Lock() + defer c.rdErrLock.Unlock() + if c.rdErr == nil || force { + c.rdErr = err + } +} + +func (c *Conn) getRdErr() error { + c.rdErrLock.Lock() + defer c.rdErrLock.Unlock() + return c.rdErr +} + +func (c *Conn) isAsyncReaderRunning() bool { + c.asyncReaderLock.Lock() + defer c.asyncReaderLock.Unlock() + return c.asyncReaderRunning +} + +func (c *Conn) asyncReader() { + for { + resp, err := c.ReadResponse() + if err != nil { + c.setRdErr(err, false) + break + } + if resp.IsAsync() { + c.eventChan <- resp + } else { + c.respChan <- resp + } + } + close(c.eventChan) + close(c.respChan) + c.closeWg.Done() + + // In theory, we would lock and set asyncReaderRunning to false here, but + // once it's started, the only way it returns is if there is a catastrophic + // failure, or a graceful shutdown. Changing this will require redoing how + // Close() works. +} + +// Debug enables/disables debug logging of control port chatter. +func (c *Conn) Debug(enable bool) { + c.debugLog = enable +} + +// Close closes the connection. +func (c *Conn) Close() error { + c.asyncReaderLock.Lock() + defer c.asyncReaderLock.Unlock() + + err := c.conn.Close() + if err != nil && c.asyncReaderRunning { + c.closeWg.Wait() + } + c.setRdErr(io.ErrClosedPipe, true) + return err +} + +// StartAsyncReader starts the asynchronous reader go routine that allows +// asynchronous events to be handled. It must not be called simultaniously +// with Read, Request, or ReadResponse or undefined behavior will occur. +func (c *Conn) StartAsyncReader() { + c.asyncReaderLock.Lock() + defer c.asyncReaderLock.Unlock() + if c.asyncReaderRunning { + return + } + + // Allocate the channels and kick off the read worker. + c.eventChan = make(chan *Response, maxEventBacklog) + c.respChan = make(chan *Response, maxResponseBacklog) + c.closeWg.Add(1) + go c.asyncReader() + c.asyncReaderRunning = true +} + +// NextEvent returns the next asynchronous event received, blocking if +// neccecary. In order to enable asynchronous event handling, StartAsyncReader +// must be called first. +func (c *Conn) NextEvent() (*Response, error) { + if err := c.getRdErr(); err != nil { + return nil, err + } + if !c.isAsyncReaderRunning() { + return nil, ErrNoAsyncReader + } + + resp, ok := <-c.eventChan + if resp != nil { + return resp, nil + } else if !ok { + return nil, io.ErrClosedPipe + } + panic("BUG: NextEvent() returned a nil response and error") +} + +// Request sends a raw control port request and returns the response. +// If the async. reader is not currently running, events received while waiting +// for the response will be silently dropped. Calling Request simultaniously +// with StartAsyncReader, Read, Write, or ReadResponse will lead to undefined +// behavior. +func (c *Conn) Request(fmt string, args ...interface{}) (*Response, error) { + if err := c.getRdErr(); err != nil { + return nil, err + } + asyncResp := c.isAsyncReaderRunning() + + if c.debugLog { + log.Printf("C: %s", gofmt.Sprintf(fmt, args...)) + } + + id, err := c.conn.Cmd(fmt, args...) + if err != nil { + return nil, err + } + + c.conn.StartResponse(id) + defer c.conn.EndResponse(id) + var resp *Response + if asyncResp { + var ok bool + resp, ok = <-c.respChan + if resp == nil && !ok { + return nil, io.ErrClosedPipe + } + } else { + // Event handing requires the asyncReader() goroutine, try to get a + // response, while silently swallowing events. + for resp == nil || resp.IsAsync() { + resp, err = c.ReadResponse() + if err != nil { + return nil, err + } + } + } + if resp == nil { + panic("BUG: Request() returned a nil response and error") + } + if resp.IsOk() { + return resp, nil + } + return resp, resp.Err +} + +// Read reads directly from the control port connection. Mixing this call +// with Request, ReadResponse, or asynchronous events will lead to undefined +// behavior. +func (c *Conn) Read(p []byte) (int, error) { + return c.conn.R.Read(p) +} + +// Write writes directly from the control port connection. Mixing this call +// with Request will lead to undefined behavior. +func (c *Conn) Write(p []byte) (int, error) { + n, err := c.conn.W.Write(p) + if err == nil { + // If the write succeeds, but the flush fails, n will be incorrect... + return n, c.conn.W.Flush() + } + return n, err +} + +// Dial connects to a given network/address and returns a new Conn for the +// connection. +func Dial(network, addr string) (*Conn, error) { + c, err := net.Dial(network, addr) + if err != nil { + return nil, err + } + return NewConn(c), nil +} + +// NewConn returns a new Conn using c for I/O. +func NewConn(c io.ReadWriteCloser) *Conn { + conn := new(Conn) + conn.conn = textproto.NewConn(c) + return conn +} + +func newProtocolError(fmt string, args ...interface{}) textproto.ProtocolError { + return textproto.ProtocolError(gofmt.Sprintf(fmt, args...)) +} + +var _ io.ReadWriteCloser = (*Conn)(nil) diff --git a/vendor/github.com/Yawning/bulb/dialer.go b/vendor/github.com/Yawning/bulb/dialer.go new file mode 100644 index 0000000..3b3c0cb --- /dev/null +++ b/vendor/github.com/Yawning/bulb/dialer.go @@ -0,0 +1,54 @@ +// dialer.go - Tor backed proxy.Dialer. +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +package bulb + +import ( + "strconv" + "strings" + + "golang.org/x/net/proxy" +) + +// Dialer returns a proxy.Dialer for the given Tor instance. +func (c *Conn) Dialer(auth *proxy.Auth) (proxy.Dialer, error) { + const ( + cmdGetInfo = "GETINFO" + socksListeners = "net/listeners/socks" + unixPrefix = "unix:" + ) + + // Query for the SOCKS listeners via a GETINFO request. + resp, err := c.Request("%s %s", cmdGetInfo, socksListeners) + if err != nil { + return nil, err + } + + if len(resp.Data) != 1 { + return nil, newProtocolError("no SOCKS listeners configured") + } + splitResp := strings.Split(resp.Data[0], " ") + if len(splitResp) < 1 { + return nil, newProtocolError("no SOCKS listeners configured") + } + + // The first listener will have a "net/listeners/socks=" prefix, and all + // entries are QuotedStrings. + laddrStr := strings.TrimPrefix(splitResp[0], socksListeners+"=") + if laddrStr == splitResp[0] { + return nil, newProtocolError("failed to parse SOCKS listener") + } + laddrStr, _ = strconv.Unquote(laddrStr) + + // Construct the proxyDialer. + if strings.HasPrefix(laddrStr, unixPrefix) { + unixPath := strings.TrimPrefix(laddrStr, unixPrefix) + return proxy.SOCKS5("unix", unixPath, auth, proxy.Direct) + } + + return proxy.SOCKS5("tcp", laddrStr, auth, proxy.Direct) +} diff --git a/vendor/github.com/Yawning/bulb/listener.go b/vendor/github.com/Yawning/bulb/listener.go new file mode 100644 index 0000000..b7ddfa8 --- /dev/null +++ b/vendor/github.com/Yawning/bulb/listener.go @@ -0,0 +1,87 @@ +// listener.go - Tor backed net.Listener. +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +package bulb + +import ( + "crypto" + "fmt" + "net" + "strconv" +) + +type onionAddr struct { + info *OnionInfo + port uint16 +} + +func (a *onionAddr) Network() string { + return "tcp" +} + +func (a *onionAddr) String() string { + return fmt.Sprintf("%s.onion:%d", a.info.OnionID, a.port) +} + +type onionListener struct { + addr *onionAddr + ctrlConn *Conn + listener net.Listener +} + +func (l *onionListener) Accept() (net.Conn, error) { + return l.listener.Accept() +} + +func (l *onionListener) Close() (err error) { + if err = l.listener.Close(); err == nil { + // Only delete the onion once. + err = l.ctrlConn.DeleteOnion(l.addr.info.OnionID) + } + return err +} + +func (l *onionListener) Addr() net.Addr { + return l.addr +} + +// Listener returns a net.Listener backed by a Onion Service, optionally +// having Tor generate an ephemeral private key. Regardless of the status of +// the returned Listener, the Onion Service will be torn down when the control +// connection is closed. +// +// WARNING: Only one port can be listened to per PrivateKey if this interface +// is used. To bind to more ports, use the AddOnion call directly. +func (c *Conn) Listener(port uint16, key crypto.PrivateKey) (net.Listener, error) { + const ( + loopbackAddr = "127.0.0.1:0" + ) + + // Listen on the loopback interface. + tcpListener, err := net.Listen("tcp4", loopbackAddr) + if err != nil { + return nil, err + } + tAddr, ok := tcpListener.Addr().(*net.TCPAddr) + if !ok { + tcpListener.Close() + return nil, newProtocolError("failed to extract local port") + } + + // Create the onion. + ports := []OnionPortSpec{{port, strconv.FormatUint((uint64)(tAddr.Port), 10)}} + oi, err := c.AddOnion(ports, key, key == nil) + if err != nil { + tcpListener.Close() + return nil, err + } + + oa := &onionAddr{info: oi, port: port} + ol := &onionListener{addr: oa, ctrlConn: c, listener: tcpListener} + + return ol, nil +} diff --git a/vendor/github.com/Yawning/bulb/response.go b/vendor/github.com/Yawning/bulb/response.go new file mode 100644 index 0000000..8fb2dc8 --- /dev/null +++ b/vendor/github.com/Yawning/bulb/response.go @@ -0,0 +1,125 @@ +// response.go - Generic response handler +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +package bulb + +import ( + "log" + "net/textproto" + "strconv" + "strings" +) + +// Response is a response to a control port command, or an asyncrhonous event. +type Response struct { + // Err is the status code and string representation associated with a + // response. Responses that have completed successfully will also have + // Err set to indicate such. + Err *textproto.Error + + // Reply is the text on the EndReplyLine of the response. + Reply string + + // Data is the MidReplyLines/DataReplyLines of the response. Dot encoded + // data is "decoded" and presented as a single string (terminal ".CRLF" + // removed, all intervening CRs stripped). + Data []string + + // RawLines is all of the lines of a response, without CRLFs. + RawLines []string +} + +// IsOk returns true if the response status code indicates success or +// an asynchronous event. +func (r *Response) IsOk() bool { + switch r.Err.Code { + case StatusOk, StatusOkUnneccecary, StatusAsyncEvent: + return true + default: + return false + } +} + +// IsAsync returns true if the response is an asyncrhonous event. +func (r *Response) IsAsync() bool { + return r.Err.Code == StatusAsyncEvent +} + +// ReadResponse returns the next response object. Calling this +// simultaniously with Read, Request, or StartAsyncReader will lead to +// undefined behavior +func (c *Conn) ReadResponse() (*Response, error) { + var resp *Response + var statusCode int + for { + line, err := c.conn.ReadLine() + if err != nil { + return nil, err + } + if c.debugLog { + log.Printf("S: %s", line) + } + + // Parse the line that was just read. + if len(line) < 4 { + return nil, newProtocolError("truncated response: '%s'", line) + } + if code, err := strconv.Atoi(line[0:3]); err != nil { + return nil, newProtocolError("invalid status code: '%s'", line[0:3]) + } else if code < 100 { + return nil, newProtocolError("invalid status code: '%s'", line[0:3]) + } else if resp == nil { + resp = new(Response) + statusCode = code + } else if code != statusCode { + // The status code should stay fixed for all lines of the + // response, since events can't be interleaved with response + // lines. + return nil, newProtocolError("status code changed: %03d != %03d", code, statusCode) + } + if resp.RawLines == nil { + resp.RawLines = make([]string, 0, 1) + } + + if line[3] == ' ' { + // Final line in the response. + resp.Reply = line[4:] + resp.Err = statusCodeToError(statusCode, resp.Reply) + resp.RawLines = append(resp.RawLines, line) + return resp, nil + } + + if resp.Data == nil { + resp.Data = make([]string, 0, 1) + } + switch line[3] { + case '-': + // Continuation, keep reading. + resp.Data = append(resp.Data, line[4:]) + resp.RawLines = append(resp.RawLines, line) + case '+': + // A "dot-encoded" payload follows. + resp.Data = append(resp.Data, line[4:]) + resp.RawLines = append(resp.RawLines, line) + dotBody, err := c.conn.ReadDotBytes() + if err != nil { + return nil, err + } + if c.debugLog { + log.Printf("S: [dot encoded data]") + } + resp.Data = append(resp.Data, string(dotBody)) + dotLines := strings.Split(string(dotBody), "\n") + for _, dotLine := range dotLines[:len(dotLines)-1] { + resp.RawLines = append(resp.RawLines, dotLine) + } + resp.RawLines = append(resp.RawLines, ".") + default: + return nil, newProtocolError("invalid separator: '%c'", line[3]) + } + } +} diff --git a/vendor/github.com/Yawning/bulb/status.go b/vendor/github.com/Yawning/bulb/status.go new file mode 100644 index 0000000..c0971c5 --- /dev/null +++ b/vendor/github.com/Yawning/bulb/status.go @@ -0,0 +1,71 @@ +// status.go - Status codes. +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +package bulb + +import ( + "fmt" + "strings" + "net/textproto" +) + +// The various control port StatusCode constants. +const ( + StatusOk = 250 + StatusOkUnneccecary = 251 + + StatusErrResourceExhausted = 451 + StatusErrSyntaxError = 500 + StatusErrUnrecognizedCmd = 510 + StatusErrUnimplementedCmd = 511 + StatusErrSyntaxErrorArg = 512 + StatusErrUnrecognizedCmdArg = 513 + StatusErrAuthenticationRequired = 514 + StatusErrBadAuthentication = 515 + StatusErrUnspecifiedTorError = 550 + StatusErrInternalError = 551 + StatusErrUnrecognizedEntity = 552 + StatusErrInvalidConfigValue = 553 + StatusErrInvalidDescriptor = 554 + StatusErrUnmanagedEntity = 555 + + StatusAsyncEvent = 650 +) + +var statusCodeStringMap = map[int]string{ + StatusOk: "OK", + StatusOkUnneccecary: "Operation was unnecessary", + + StatusErrResourceExhausted: "Resource exhausted", + StatusErrSyntaxError: "Syntax error: protocol", + StatusErrUnrecognizedCmd: "Unrecognized command", + StatusErrUnimplementedCmd: "Unimplemented command", + StatusErrSyntaxErrorArg: "Syntax error in command argument", + StatusErrUnrecognizedCmdArg: "Unrecognized command argument", + StatusErrAuthenticationRequired: "Authentication required", + StatusErrBadAuthentication: "Bad authentication", + StatusErrUnspecifiedTorError: "Unspecified Tor error", + StatusErrInternalError: "Internal error", + StatusErrUnrecognizedEntity: "Unrecognized entity", + StatusErrInvalidConfigValue: "Invalid configuration value", + StatusErrInvalidDescriptor: "Invalid descriptor", + StatusErrUnmanagedEntity: "Unmanaged entity", + + StatusAsyncEvent: "Asynchronous event notification", +} + +func statusCodeToError(code int, reply string) *textproto.Error { + err := new(textproto.Error) + err.Code = code + if msg, ok := statusCodeStringMap[code]; ok { + trimmedReply := strings.TrimSpace(strings.TrimPrefix(reply, msg)) + err.Msg = fmt.Sprintf("%s: %s", msg, trimmedReply) + } else { + err.Msg = fmt.Sprintf("Unknown status code (%03d): %s", code, reply) + } + return err +} diff --git a/vendor/github.com/cenk/backoff/backoff.go b/vendor/github.com/cenk/backoff/backoff.go new file mode 100644 index 0000000..2102c5f --- /dev/null +++ b/vendor/github.com/cenk/backoff/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenk/backoff/exponential.go b/vendor/github.com/cenk/backoff/exponential.go new file mode 100644 index 0000000..ae65516 --- /dev/null +++ b/vendor/github.com/cenk/backoff/exponential.go @@ -0,0 +1,156 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + if b.RandomizationFactor < 0 { + b.RandomizationFactor = 0 + } else if b.RandomizationFactor > 1 { + b.RandomizationFactor = 1 + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenk/backoff/retry.go b/vendor/github.com/cenk/backoff/retry.go new file mode 100644 index 0000000..6bc88ce --- /dev/null +++ b/vendor/github.com/cenk/backoff/retry.go @@ -0,0 +1,46 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/vendor/github.com/cenk/backoff/ticker.go b/vendor/github.com/cenk/backoff/ticker.go new file mode 100644 index 0000000..7a5ff4e --- /dev/null +++ b/vendor/github.com/cenk/backoff/ticker.go @@ -0,0 +1,79 @@ +package backoff + +import ( + "runtime" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send the time at times +// specified by the BackOff argument. Ticker is guaranteed to tick at least once. +// The channel is closed when Stop method is called or BackOff stops. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + stop: make(chan struct{}), + } + go t.run() + runtime.SetFinalizer(t, (*Ticker).Stop) + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + t.b.Reset() + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/vendor/github.com/cenkalti/backoff/backoff.go b/vendor/github.com/cenkalti/backoff/backoff.go new file mode 100644 index 0000000..2102c5f --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/backoff.go @@ -0,0 +1,66 @@ +// Package backoff implements backoff algorithms for retrying operations. +// +// Use Retry function for retrying operations that may fail. +// If Retry does not meet your needs, +// copy/paste the function into your project and modify as you wish. +// +// There is also Ticker type similar to time.Ticker. +// You can use it if you need to work with channels. +// +// See Examples section below for usage examples. +package backoff + +import "time" + +// BackOff is a backoff policy for retrying an operation. +type BackOff interface { + // NextBackOff returns the duration to wait before retrying the operation, + // or backoff.Stop to indicate that no more retries should be made. + // + // Example usage: + // + // duration := backoff.NextBackOff(); + // if (duration == backoff.Stop) { + // // Do not retry operation. + // } else { + // // Sleep for duration and retry operation. + // } + // + NextBackOff() time.Duration + + // Reset to initial state. + Reset() +} + +// Stop indicates that no more retries should be made for use in NextBackOff(). +const Stop time.Duration = -1 + +// ZeroBackOff is a fixed backoff policy whose backoff time is always zero, +// meaning that the operation is retried immediately without waiting, indefinitely. +type ZeroBackOff struct{} + +func (b *ZeroBackOff) Reset() {} + +func (b *ZeroBackOff) NextBackOff() time.Duration { return 0 } + +// StopBackOff is a fixed backoff policy that always returns backoff.Stop for +// NextBackOff(), meaning that the operation should never be retried. +type StopBackOff struct{} + +func (b *StopBackOff) Reset() {} + +func (b *StopBackOff) NextBackOff() time.Duration { return Stop } + +// ConstantBackOff is a backoff policy that always returns the same backoff delay. +// This is in contrast to an exponential backoff policy, +// which returns a delay that grows longer as you call NextBackOff() over and over again. +type ConstantBackOff struct { + Interval time.Duration +} + +func (b *ConstantBackOff) Reset() {} +func (b *ConstantBackOff) NextBackOff() time.Duration { return b.Interval } + +func NewConstantBackOff(d time.Duration) *ConstantBackOff { + return &ConstantBackOff{Interval: d} +} diff --git a/vendor/github.com/cenkalti/backoff/exponential.go b/vendor/github.com/cenkalti/backoff/exponential.go new file mode 100644 index 0000000..ae65516 --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/exponential.go @@ -0,0 +1,156 @@ +package backoff + +import ( + "math/rand" + "time" +) + +/* +ExponentialBackOff is a backoff implementation that increases the backoff +period for each retry attempt using a randomization function that grows exponentially. + +NextBackOff() is calculated using the following formula: + + randomized interval = + RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor]) + +In other words NextBackOff() will range between the randomization factor +percentage below and above the retry interval. + +For example, given the following parameters: + + RetryInterval = 2 + RandomizationFactor = 0.5 + Multiplier = 2 + +the actual backoff period used in the next retry attempt will range between 1 and 3 seconds, +multiplied by the exponential, that is, between 2 and 6 seconds. + +Note: MaxInterval caps the RetryInterval and not the randomized interval. + +If the time elapsed since an ExponentialBackOff instance is created goes past the +MaxElapsedTime, then the method NextBackOff() starts returning backoff.Stop. + +The elapsed time can be reset by calling Reset(). + +Example: Given the following default arguments, for 10 tries the sequence will be, +and assuming we go over the MaxElapsedTime on the 10th try: + + Request # RetryInterval (seconds) Randomized Interval (seconds) + + 1 0.5 [0.25, 0.75] + 2 0.75 [0.375, 1.125] + 3 1.125 [0.562, 1.687] + 4 1.687 [0.8435, 2.53] + 5 2.53 [1.265, 3.795] + 6 3.795 [1.897, 5.692] + 7 5.692 [2.846, 8.538] + 8 8.538 [4.269, 12.807] + 9 12.807 [6.403, 19.210] + 10 19.210 backoff.Stop + +Note: Implementation is not thread-safe. +*/ +type ExponentialBackOff struct { + InitialInterval time.Duration + RandomizationFactor float64 + Multiplier float64 + MaxInterval time.Duration + // After MaxElapsedTime the ExponentialBackOff stops. + // It never stops if MaxElapsedTime == 0. + MaxElapsedTime time.Duration + Clock Clock + + currentInterval time.Duration + startTime time.Time +} + +// Clock is an interface that returns current time for BackOff. +type Clock interface { + Now() time.Time +} + +// Default values for ExponentialBackOff. +const ( + DefaultInitialInterval = 500 * time.Millisecond + DefaultRandomizationFactor = 0.5 + DefaultMultiplier = 1.5 + DefaultMaxInterval = 60 * time.Second + DefaultMaxElapsedTime = 15 * time.Minute +) + +// NewExponentialBackOff creates an instance of ExponentialBackOff using default values. +func NewExponentialBackOff() *ExponentialBackOff { + b := &ExponentialBackOff{ + InitialInterval: DefaultInitialInterval, + RandomizationFactor: DefaultRandomizationFactor, + Multiplier: DefaultMultiplier, + MaxInterval: DefaultMaxInterval, + MaxElapsedTime: DefaultMaxElapsedTime, + Clock: SystemClock, + } + if b.RandomizationFactor < 0 { + b.RandomizationFactor = 0 + } else if b.RandomizationFactor > 1 { + b.RandomizationFactor = 1 + } + b.Reset() + return b +} + +type systemClock struct{} + +func (t systemClock) Now() time.Time { + return time.Now() +} + +// SystemClock implements Clock interface that uses time.Now(). +var SystemClock = systemClock{} + +// Reset the interval back to the initial retry interval and restarts the timer. +func (b *ExponentialBackOff) Reset() { + b.currentInterval = b.InitialInterval + b.startTime = b.Clock.Now() +} + +// NextBackOff calculates the next backoff interval using the formula: +// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval) +func (b *ExponentialBackOff) NextBackOff() time.Duration { + // Make sure we have not gone over the maximum elapsed time. + if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime { + return Stop + } + defer b.incrementCurrentInterval() + return getRandomValueFromInterval(b.RandomizationFactor, rand.Float64(), b.currentInterval) +} + +// GetElapsedTime returns the elapsed time since an ExponentialBackOff instance +// is created and is reset when Reset() is called. +// +// The elapsed time is computed using time.Now().UnixNano(). +func (b *ExponentialBackOff) GetElapsedTime() time.Duration { + return b.Clock.Now().Sub(b.startTime) +} + +// Increments the current interval by multiplying it with the multiplier. +func (b *ExponentialBackOff) incrementCurrentInterval() { + // Check for overflow, if overflow is detected set the current interval to the max interval. + if float64(b.currentInterval) >= float64(b.MaxInterval)/b.Multiplier { + b.currentInterval = b.MaxInterval + } else { + b.currentInterval = time.Duration(float64(b.currentInterval) * b.Multiplier) + } +} + +// Returns a random value from the following interval: +// [randomizationFactor * currentInterval, randomizationFactor * currentInterval]. +func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration { + var delta = randomizationFactor * float64(currentInterval) + var minInterval = float64(currentInterval) - delta + var maxInterval = float64(currentInterval) + delta + + // Get a random value from the range [minInterval, maxInterval]. + // The formula used below has a +1 because if the minInterval is 1 and the maxInterval is 3 then + // we want a 33% chance for selecting either 1, 2 or 3. + return time.Duration(minInterval + (random * (maxInterval - minInterval + 1))) +} diff --git a/vendor/github.com/cenkalti/backoff/retry.go b/vendor/github.com/cenkalti/backoff/retry.go new file mode 100644 index 0000000..6bc88ce --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/retry.go @@ -0,0 +1,46 @@ +package backoff + +import "time" + +// An Operation is executing by Retry() or RetryNotify(). +// The operation will be retried using a backoff policy if it returns an error. +type Operation func() error + +// Notify is a notify-on-error function. It receives an operation error and +// backoff delay if the operation failed (with an error). +// +// NOTE that if the backoff policy stated to stop retrying, +// the notify function isn't called. +type Notify func(error, time.Duration) + +// Retry the operation o until it does not return error or BackOff stops. +// o is guaranteed to be run at least once. +// It is the caller's responsibility to reset b after Retry returns. +// +// Retry sleeps the goroutine for the duration returned by BackOff after a +// failed operation returns. +func Retry(o Operation, b BackOff) error { return RetryNotify(o, b, nil) } + +// RetryNotify calls notify function with the error and wait duration +// for each failed attempt before sleep. +func RetryNotify(operation Operation, b BackOff, notify Notify) error { + var err error + var next time.Duration + + b.Reset() + for { + if err = operation(); err == nil { + return nil + } + + if next = b.NextBackOff(); next == Stop { + return err + } + + if notify != nil { + notify(err, next) + } + + time.Sleep(next) + } +} diff --git a/vendor/github.com/cenkalti/backoff/ticker.go b/vendor/github.com/cenkalti/backoff/ticker.go new file mode 100644 index 0000000..7a5ff4e --- /dev/null +++ b/vendor/github.com/cenkalti/backoff/ticker.go @@ -0,0 +1,79 @@ +package backoff + +import ( + "runtime" + "sync" + "time" +) + +// Ticker holds a channel that delivers `ticks' of a clock at times reported by a BackOff. +// +// Ticks will continue to arrive when the previous operation is still running, +// so operations that take a while to fail could run in quick succession. +type Ticker struct { + C <-chan time.Time + c chan time.Time + b BackOff + stop chan struct{} + stopOnce sync.Once +} + +// NewTicker returns a new Ticker containing a channel that will send the time at times +// specified by the BackOff argument. Ticker is guaranteed to tick at least once. +// The channel is closed when Stop method is called or BackOff stops. +func NewTicker(b BackOff) *Ticker { + c := make(chan time.Time) + t := &Ticker{ + C: c, + c: c, + b: b, + stop: make(chan struct{}), + } + go t.run() + runtime.SetFinalizer(t, (*Ticker).Stop) + return t +} + +// Stop turns off a ticker. After Stop, no more ticks will be sent. +func (t *Ticker) Stop() { + t.stopOnce.Do(func() { close(t.stop) }) +} + +func (t *Ticker) run() { + c := t.c + defer close(c) + t.b.Reset() + + // Ticker is guaranteed to tick at least once. + afterC := t.send(time.Now()) + + for { + if afterC == nil { + return + } + + select { + case tick := <-afterC: + afterC = t.send(tick) + case <-t.stop: + t.c = nil // Prevent future ticks from being sent to the channel. + return + } + } +} + +func (t *Ticker) send(tick time.Time) <-chan time.Time { + select { + case t.c <- tick: + case <-t.stop: + return nil + } + + next := t.b.NextBackOff() + if next == Stop { + t.Stop() + return nil + } + + return time.After(next) +} diff --git a/vendor/github.com/facebookgo/flagenv/flagenv.go b/vendor/github.com/facebookgo/flagenv/flagenv.go new file mode 100644 index 0000000..fe2ea8d --- /dev/null +++ b/vendor/github.com/facebookgo/flagenv/flagenv.go @@ -0,0 +1,67 @@ +// Package flagenv provides the ability to populate flags from +// environment variables. +package flagenv + +import ( + "flag" + "fmt" + "log" + "os" + "strings" +) + +// Specify a prefix for environment variables. +var Prefix = "" + +func contains(list []*flag.Flag, f *flag.Flag) bool { + for _, i := range list { + if i == f { + return true + } + } + return false +} + +// ParseSet parses the given flagset. The specified prefix will be applied to +// the environment variable names. +func ParseSet(prefix string, set *flag.FlagSet) error { + var explicit []*flag.Flag + var all []*flag.Flag + set.Visit(func(f *flag.Flag) { + explicit = append(explicit, f) + }) + + var err error + set.VisitAll(func(f *flag.Flag) { + if err != nil { + return + } + all = append(all, f) + if !contains(explicit, f) { + name := strings.Replace(f.Name, ".", "_", -1) + name = strings.Replace(name, "-", "_", -1) + if prefix != "" { + name = prefix + name + } + name = strings.ToUpper(name) + val := os.Getenv(name) + if val != "" { + if ferr := f.Value.Set(val); ferr != nil { + err = fmt.Errorf("failed to set flag %q with value %q", f.Name, val) + } + } + } + }) + return err +} + +// Parse will set each defined flag from its corresponding environment +// variable . If dots or dash are presents in the flag name, they will be +// converted to underscores. +// +// If Parse fails, a fatal error is issued. +func Parse() { + if err := ParseSet(Prefix, flag.CommandLine); err != nil { + log.Fatalln(err) + } +} diff --git a/vendor/github.com/golang/protobuf/proto/clone.go b/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 0000000..e392575 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,229 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/golang/protobuf/proto/decode.go b/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 0000000..aa20729 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,970 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/golang/protobuf/proto/encode.go b/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 0000000..2b30f84 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/equal.go b/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 0000000..2ed1cf5 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/golang/protobuf/proto/extensions.go b/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 0000000..eaad218 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,587 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/golang/protobuf/proto/lib.go b/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 0000000..ac4ddbc --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,898 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Getters are only generated for message and oneof fields. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const ProtoPackageIsVersion1 = true diff --git a/vendor/github.com/golang/protobuf/proto/message_set.go b/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 0000000..fd982de --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000..fb512e2 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000..6b5567d --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/golang/protobuf/proto/properties.go b/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 0000000..ec2289c --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,872 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + def_uint64 uint64 + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + case reflect.Int32: + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + case reflect.String: + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isMarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isMarshaler") + } + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + // We're checking for (likely) pointer-receiver methods + // so if t is not a pointer, something is very wrong. + // The calls above only invoke isUnmarshaler on pointer types. + if t.Kind() != reflect.Ptr { + panic("proto: misuse of isUnmarshaler") + } + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/golang/protobuf/proto/text.go b/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 0000000..965876b --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,854 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, ok := extendable(pv.Interface()); ok { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/golang/protobuf/proto/text_parser.go b/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 0000000..61f83c1 --- /dev/null +++ b/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,895 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go b/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go new file mode 100644 index 0000000..8627aa5 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/epsilon_greedy.go @@ -0,0 +1,220 @@ +package hostpool + +import ( + "log" + "math/rand" + "time" +) + +type epsilonHostPoolResponse struct { + standardHostPoolResponse + started time.Time + ended time.Time +} + +func (r *epsilonHostPoolResponse) Mark(err error) { + r.Do(func() { + r.ended = time.Now() + doMark(err, r) + }) +} + +type epsilonGreedyHostPool struct { + standardHostPool // TODO - would be nifty if we could embed HostPool and Locker interfaces + epsilon float32 // this is our exploration factor + decayDuration time.Duration + EpsilonValueCalculator // embed the epsilonValueCalculator + timer + quit chan bool +} + +// Construct an Epsilon Greedy HostPool +// +// Epsilon Greedy is an algorithm that allows HostPool not only to track failure state, +// but also to learn about "better" options in terms of speed, and to pick from available hosts +// based on how well they perform. This gives a weighted request rate to better +// performing hosts, while still distributing requests to all hosts (proportionate to their performance). +// The interface is the same as the standard HostPool, but be sure to mark the HostResponse immediately +// after executing the request to the host, as that will stop the implicitly running request timer. +// +// A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 +// +// To compute the weighting scores, we perform a weighted average of recent response times, over the course of +// `decayDuration`. decayDuration may be set to 0 to use the default value of 5 minutes +// We then use the supplied EpsilonValueCalculator to calculate a score from that weighted average response time. +func NewEpsilonGreedy(hosts []string, decayDuration time.Duration, calc EpsilonValueCalculator) HostPool { + + if decayDuration <= 0 { + decayDuration = defaultDecayDuration + } + stdHP := New(hosts).(*standardHostPool) + p := &epsilonGreedyHostPool{ + standardHostPool: *stdHP, + epsilon: float32(initialEpsilon), + decayDuration: decayDuration, + EpsilonValueCalculator: calc, + timer: &realTimer{}, + quit: make(chan bool), + } + + // allocate structures + for _, h := range p.hostList { + h.epsilonCounts = make([]int64, epsilonBuckets) + h.epsilonValues = make([]int64, epsilonBuckets) + } + go p.epsilonGreedyDecay() + return p +} + +func (p *epsilonGreedyHostPool) Close() { + // No need to do p.quit <- true as close(p.quit) does the trick. + close(p.quit) +} + +func (p *epsilonGreedyHostPool) SetEpsilon(newEpsilon float32) { + p.Lock() + defer p.Unlock() + p.epsilon = newEpsilon +} + +func (p *epsilonGreedyHostPool) SetHosts(hosts []string) { + p.Lock() + defer p.Unlock() + p.standardHostPool.setHosts(hosts) + for _, h := range p.hostList { + h.epsilonCounts = make([]int64, epsilonBuckets) + h.epsilonValues = make([]int64, epsilonBuckets) + } +} + +func (p *epsilonGreedyHostPool) epsilonGreedyDecay() { + durationPerBucket := p.decayDuration / epsilonBuckets + ticker := time.NewTicker(durationPerBucket) + for { + select { + case <-p.quit: + ticker.Stop() + return + case <-ticker.C: + p.performEpsilonGreedyDecay() + } + } +} +func (p *epsilonGreedyHostPool) performEpsilonGreedyDecay() { + p.Lock() + for _, h := range p.hostList { + h.epsilonIndex += 1 + h.epsilonIndex = h.epsilonIndex % epsilonBuckets + h.epsilonCounts[h.epsilonIndex] = 0 + h.epsilonValues[h.epsilonIndex] = 0 + } + p.Unlock() +} + +func (p *epsilonGreedyHostPool) Get() HostPoolResponse { + p.Lock() + defer p.Unlock() + host := p.getEpsilonGreedy() + if host == "" { + return nil + } + + started := time.Now() + return &epsilonHostPoolResponse{ + standardHostPoolResponse: standardHostPoolResponse{host: host, pool: p}, + started: started, + } +} + +func (p *epsilonGreedyHostPool) getEpsilonGreedy() string { + var hostToUse *hostEntry + + // this is our exploration phase + if rand.Float32() < p.epsilon { + p.epsilon = p.epsilon * epsilonDecay + if p.epsilon < minEpsilon { + p.epsilon = minEpsilon + } + return p.getRoundRobin() + } + + // calculate values for each host in the 0..1 range (but not ormalized) + var possibleHosts []*hostEntry + now := time.Now() + var sumValues float64 + for _, h := range p.hostList { + if h.canTryHost(now) { + v := h.getWeightedAverageResponseTime() + if v > 0 { + ev := p.CalcValueFromAvgResponseTime(v) + h.epsilonValue = ev + sumValues += ev + possibleHosts = append(possibleHosts, h) + } + } + } + + if len(possibleHosts) != 0 { + // now normalize to the 0..1 range to get a percentage + for _, h := range possibleHosts { + h.epsilonPercentage = h.epsilonValue / sumValues + } + + // do a weighted random choice among hosts + ceiling := 0.0 + pickPercentage := rand.Float64() + for _, h := range possibleHosts { + ceiling += h.epsilonPercentage + if pickPercentage <= ceiling { + hostToUse = h + break + } + } + } + + if hostToUse == nil { + if len(possibleHosts) != 0 { + log.Println("Failed to randomly choose a host, Dan loses") + } + + return p.getRoundRobin() + } + + if hostToUse.dead { + hostToUse.willRetryHost(p.maxRetryInterval) + } + return hostToUse.host +} + +func (p *epsilonGreedyHostPool) markSuccess(hostR HostPoolResponse) { + // first do the base markSuccess - a little redundant with host lookup but cleaner than repeating logic + p.standardHostPool.markSuccess(hostR) + eHostR, ok := hostR.(*epsilonHostPoolResponse) + if !ok { + log.Printf("Incorrect type in eps markSuccess!") // TODO reflection to print out offending type + return + } + host := eHostR.host + duration := p.between(eHostR.started, eHostR.ended) + + p.Lock() + defer p.Unlock() + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + h.epsilonCounts[h.epsilonIndex]++ + h.epsilonValues[h.epsilonIndex] += int64(duration.Seconds() * 1000) +} + +// --- timer: this just exists for testing + +type timer interface { + between(time.Time, time.Time) time.Duration +} + +type realTimer struct{} + +func (rt *realTimer) between(start time.Time, end time.Time) time.Duration { + return end.Sub(start) +} diff --git a/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go b/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go new file mode 100644 index 0000000..9bc3102 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/epsilon_value_calculators.go @@ -0,0 +1,40 @@ +package hostpool + +// --- Value Calculators ----------------- + +import ( + "math" +) + +// --- Definitions ----------------------- + +// Structs implementing this interface are used to convert the average response time for a host +// into a score that can be used to weight hosts in the epsilon greedy hostpool. Lower response +// times should yield higher scores (we want to select the faster hosts more often) The default +// LinearEpsilonValueCalculator just uses the reciprocal of the response time. In practice, any +// decreasing function from the positive reals to the positive reals should work. +type EpsilonValueCalculator interface { + CalcValueFromAvgResponseTime(float64) float64 +} + +type LinearEpsilonValueCalculator struct{} +type LogEpsilonValueCalculator struct{ LinearEpsilonValueCalculator } +type PolynomialEpsilonValueCalculator struct { + LinearEpsilonValueCalculator + Exp float64 // the exponent to which we will raise the value to reweight +} + +// -------- Methods ----------------------- + +func (c *LinearEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + return 1.0 / v +} + +func (c *LogEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + // we need to add 1 to v so that this will be defined on all positive floats + return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Log(v + 1.0)) +} + +func (c *PolynomialEpsilonValueCalculator) CalcValueFromAvgResponseTime(v float64) float64 { + return c.LinearEpsilonValueCalculator.CalcValueFromAvgResponseTime(math.Pow(v, c.Exp)) +} diff --git a/vendor/github.com/hailocab/go-hostpool/host_entry.go b/vendor/github.com/hailocab/go-hostpool/host_entry.go new file mode 100644 index 0000000..dcec9a0 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/host_entry.go @@ -0,0 +1,62 @@ +package hostpool + +import ( + "time" +) + +// --- hostEntry - this is due to get upgraded + +type hostEntry struct { + host string + nextRetry time.Time + retryCount int16 + retryDelay time.Duration + dead bool + epsilonCounts []int64 + epsilonValues []int64 + epsilonIndex int + epsilonValue float64 + epsilonPercentage float64 +} + +func (h *hostEntry) canTryHost(now time.Time) bool { + if !h.dead { + return true + } + if h.nextRetry.Before(now) { + return true + } + return false +} + +func (h *hostEntry) willRetryHost(maxRetryInterval time.Duration) { + h.retryCount += 1 + newDelay := h.retryDelay * 2 + if newDelay < maxRetryInterval { + h.retryDelay = newDelay + } else { + h.retryDelay = maxRetryInterval + } + h.nextRetry = time.Now().Add(h.retryDelay) +} + +func (h *hostEntry) getWeightedAverageResponseTime() float64 { + var value float64 + var lastValue float64 + + // start at 1 so we start with the oldest entry + for i := 1; i <= epsilonBuckets; i += 1 { + pos := (h.epsilonIndex + i) % epsilonBuckets + bucketCount := h.epsilonCounts[pos] + // Changing the line below to what I think it should be to get the weights right + weight := float64(i) / float64(epsilonBuckets) + if bucketCount > 0 { + currentValue := float64(h.epsilonValues[pos]) / float64(bucketCount) + value += currentValue * weight + lastValue = currentValue + } else { + value += lastValue * weight + } + } + return value +} diff --git a/vendor/github.com/hailocab/go-hostpool/hostpool.go b/vendor/github.com/hailocab/go-hostpool/hostpool.go new file mode 100644 index 0000000..702ca92 --- /dev/null +++ b/vendor/github.com/hailocab/go-hostpool/hostpool.go @@ -0,0 +1,243 @@ +// A Go package to intelligently and flexibly pool among multiple hosts from your Go application. +// Host selection can operate in round robin or epsilon greedy mode, and unresponsive hosts are +// avoided. A good overview of Epsilon Greedy is here http://stevehanov.ca/blog/index.php?id=132 +package hostpool + +import ( + "log" + "sync" + "time" +) + +// Returns current version +func Version() string { + return "0.1" +} + +// --- Response interfaces and structs ---- + +// This interface represents the response from HostPool. You can retrieve the +// hostname by calling Host(), and after making a request to the host you should +// call Mark with any error encountered, which will inform the HostPool issuing +// the HostPoolResponse of what happened to the request and allow it to update. +type HostPoolResponse interface { + Host() string + Mark(error) + hostPool() HostPool +} + +type standardHostPoolResponse struct { + host string + sync.Once + pool HostPool +} + +// --- HostPool structs and interfaces ---- + +// This is the main HostPool interface. Structs implementing this interface +// allow you to Get a HostPoolResponse (which includes a hostname to use), +// get the list of all Hosts, and use ResetAll to reset state. +type HostPool interface { + Get() HostPoolResponse + // keep the marks separate so we can override independently + markSuccess(HostPoolResponse) + markFailed(HostPoolResponse) + + ResetAll() + // ReturnUnhealthy when called with true will prevent an unhealthy node from + // being returned and will instead return a nil HostPoolResponse. If using + // this feature then you should check the result of Get for nil + ReturnUnhealthy(v bool) + Hosts() []string + SetHosts([]string) + + // Close the hostpool and release all resources. + Close() +} + +type standardHostPool struct { + sync.RWMutex + hosts map[string]*hostEntry + hostList []*hostEntry + returnUnhealthy bool + initialRetryDelay time.Duration + maxRetryInterval time.Duration + nextHostIndex int +} + +// ------ constants ------------------- + +const epsilonBuckets = 120 +const epsilonDecay = 0.90 // decay the exploration rate +const minEpsilon = 0.01 // explore one percent of the time +const initialEpsilon = 0.3 +const defaultDecayDuration = time.Duration(5) * time.Minute + +// Construct a basic HostPool using the hostnames provided +func New(hosts []string) HostPool { + p := &standardHostPool{ + returnUnhealthy: true, + hosts: make(map[string]*hostEntry, len(hosts)), + hostList: make([]*hostEntry, len(hosts)), + initialRetryDelay: time.Duration(30) * time.Second, + maxRetryInterval: time.Duration(900) * time.Second, + } + + for i, h := range hosts { + e := &hostEntry{ + host: h, + retryDelay: p.initialRetryDelay, + } + p.hosts[h] = e + p.hostList[i] = e + } + + return p +} + +func (r *standardHostPoolResponse) Host() string { + return r.host +} + +func (r *standardHostPoolResponse) hostPool() HostPool { + return r.pool +} + +func (r *standardHostPoolResponse) Mark(err error) { + r.Do(func() { + doMark(err, r) + }) +} + +func doMark(err error, r HostPoolResponse) { + if err == nil { + r.hostPool().markSuccess(r) + } else { + r.hostPool().markFailed(r) + } +} + +// return an entry from the HostPool +func (p *standardHostPool) Get() HostPoolResponse { + p.Lock() + defer p.Unlock() + host := p.getRoundRobin() + if host == "" { + return nil + } + + return &standardHostPoolResponse{host: host, pool: p} +} + +func (p *standardHostPool) getRoundRobin() string { + now := time.Now() + hostCount := len(p.hostList) + for i := range p.hostList { + // iterate via sequenece from where we last iterated + currentIndex := (i + p.nextHostIndex) % hostCount + + h := p.hostList[currentIndex] + if !h.dead { + p.nextHostIndex = currentIndex + 1 + return h.host + } + if h.nextRetry.Before(now) { + h.willRetryHost(p.maxRetryInterval) + p.nextHostIndex = currentIndex + 1 + return h.host + } + } + + // all hosts are down and returnUnhealhy is false then return no host + if !p.returnUnhealthy { + return "" + } + + // all hosts are down. re-add them + p.doResetAll() + p.nextHostIndex = 0 + return p.hostList[0].host +} + +func (p *standardHostPool) ResetAll() { + p.Lock() + defer p.Unlock() + p.doResetAll() +} + +func (p *standardHostPool) SetHosts(hosts []string) { + p.Lock() + defer p.Unlock() + p.setHosts(hosts) +} + +func (p *standardHostPool) ReturnUnhealthy(v bool) { + p.Lock() + defer p.Unlock() + p.returnUnhealthy = v +} + +func (p *standardHostPool) setHosts(hosts []string) { + p.hosts = make(map[string]*hostEntry, len(hosts)) + p.hostList = make([]*hostEntry, len(hosts)) + + for i, h := range hosts { + e := &hostEntry{ + host: h, + retryDelay: p.initialRetryDelay, + } + p.hosts[h] = e + p.hostList[i] = e + } +} + +// this actually performs the logic to reset, +// and should only be called when the lock has +// already been acquired +func (p *standardHostPool) doResetAll() { + for _, h := range p.hosts { + h.dead = false + } +} + +func (p *standardHostPool) Close() { + for _, h := range p.hosts { + h.dead = true + } +} + +func (p *standardHostPool) markSuccess(hostR HostPoolResponse) { + host := hostR.Host() + p.Lock() + defer p.Unlock() + + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + h.dead = false +} + +func (p *standardHostPool) markFailed(hostR HostPoolResponse) { + host := hostR.Host() + p.Lock() + defer p.Unlock() + h, ok := p.hosts[host] + if !ok { + log.Fatalf("host %s not in HostPool %v", host, p.Hosts()) + } + if !h.dead { + h.dead = true + h.retryCount = 0 + h.retryDelay = p.initialRetryDelay + h.nextRetry = time.Now().Add(h.retryDelay) + } + +} +func (p *standardHostPool) Hosts() []string { + hosts := make([]string, 0, len(p.hosts)) + for host := range p.hosts { + hosts = append(hosts, host) + } + return hosts +} diff --git a/vendor/github.com/hashicorp/yamux/addr.go b/vendor/github.com/hashicorp/yamux/addr.go new file mode 100644 index 0000000..be6ebca --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/hashicorp/yamux/const.go b/vendor/github.com/hashicorp/yamux/const.go new file mode 100644 index 0000000..4f52938 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/const.go @@ -0,0 +1,157 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/hashicorp/yamux/mux.go b/vendor/github.com/hashicorp/yamux/mux.go new file mode 100644 index 0000000..7abc7c7 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/mux.go @@ -0,0 +1,87 @@ +package yamux + +import ( + "fmt" + "io" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination + LogOutput io.Writer +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/hashicorp/yamux/session.go b/vendor/github.com/hashicorp/yamux/session.go new file mode 100644 index 0000000..e179818 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/session.go @@ -0,0 +1,623 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + s := &Session{ + config: config, + logger: log.New(config.LogOutput, "", log.LstdFlags), + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: aborted stream open without inflight syn semaphore") + } + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + timer := time.NewTimer(s.config.ConnectionWriteTimeout) + defer timer.Stop() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + timer := time.NewTimer(s.config.ConnectionWriteTimeout) + defer timer.Stop() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[ERR] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + var handler func(header) error + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + // Switch on the type + switch hdr.MsgType() { + case typeData: + handler = s.handleStreamMessage + case typeWindowUpdate: + handler = s.handleStreamMessage + case typeGoAway: + handler = s.handleGoAway + case typePing: + handler = s.handlePing + default: + return ErrInvalidMsgType + } + + // Invoke the handler + if err := handler(hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/stream.go b/vendor/github.com/hashicorp/yamux/stream.go new file mode 100644 index 0000000..d216e28 --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/stream.go @@ -0,0 +1,457 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline time.Time + writeDeadline time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + if !s.readDeadline.IsZero() { + delay := s.readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + if !s.writeDeadline.IsZero() { + delay := s.writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + delta := max - atomic.LoadUint32(&s.recvWindow) + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + return nil + } + + // Update our window + atomic.AddUint32(&s.recvWindow, delta) + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + if remain := atomic.LoadUint32(&s.recvWindow); length > remain { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, remain, length) + return ErrRecvWindowExceeded + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + atomic.AddUint32(&s.recvWindow, ^uint32(length-1)) + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline = t + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline = t + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/hashicorp/yamux/util.go b/vendor/github.com/hashicorp/yamux/util.go new file mode 100644 index 0000000..5fe45af --- /dev/null +++ b/vendor/github.com/hashicorp/yamux/util.go @@ -0,0 +1,28 @@ +package yamux + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/joho/godotenv/autoload/autoload.go b/vendor/github.com/joho/godotenv/autoload/autoload.go new file mode 100644 index 0000000..fbcd2bd --- /dev/null +++ b/vendor/github.com/joho/godotenv/autoload/autoload.go @@ -0,0 +1,15 @@ +package autoload + +/* + You can just read the .env file on import just by doing + + import _ "github.com/joho/godotenv/autoload" + + And bob's your mother's brother +*/ + +import "github.com/joho/godotenv" + +func init() { + godotenv.Load() +} diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 0000000..94b2676 --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,229 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be avaiable through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "os" + "os/exec" + "strings" +) + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + for key, value := range envMap { + if os.Getenv(key) == "" || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(file) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + key, value, err := parseLine(fullLine) + + if err == nil { + envMap[key] = value + } + } + } + return +} + +func parseLine(line string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + // now split key from value + splitString := strings.SplitN(line, "=", 2) + + if len(splitString) != 2 { + // try yaml mode! + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.Trim(key, " ") + + // Parse the value + value = splitString[1] + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values + if strings.Count(value, "\"") == 2 || strings.Count(value, "'") == 2 { + // pull the quotes off the edges + value = strings.Trim(value, "\"'") + + // expand quotes + value = strings.Replace(value, "\\\"", "\"", -1) + // expand newlines + value = strings.Replace(value, "\\n", "\n", -1) + } + + return +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.Trim(line, " \n\t") + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} diff --git a/vendor/github.com/koding/logging/context.go b/vendor/github.com/koding/logging/context.go new file mode 100644 index 0000000..f8c815c --- /dev/null +++ b/vendor/github.com/koding/logging/context.go @@ -0,0 +1,86 @@ +package logging + +import "fmt" + +type context struct { + prefix string + logger +} + +// Fatal is equivalent to Critical() followed by a call to os.Exit(1). +func (c *context) Fatal(format string, args ...interface{}) { + c.logger.Fatal(c.prefixFormat()+format, args...) +} + +// Panic is equivalent to Critical() followed by a call to panic(). +func (c *context) Panic(format string, args ...interface{}) { + c.logger.Panic(c.prefixFormat()+format, args...) +} + +// Critical sends a critical level log message to the handler. Arguments are +// handled in the manner of fmt.Printf. +func (c *context) Critical(format string, args ...interface{}) { + c.logger.Critical(c.prefixFormat()+format, args...) +} + +// Error sends a error level log message to the handler. Arguments are handled +// in the manner of fmt.Printf. +func (c *context) Error(format string, args ...interface{}) { + c.logger.Error(c.prefixFormat()+format, args...) +} + +// Warning sends a warning level log message to the handler. Arguments are +// handled in the manner of fmt.Printf. +func (c *context) Warning(format string, args ...interface{}) { + c.logger.Warning(c.prefixFormat()+format, args...) +} + +// Notice sends a notice level log message to the handler. Arguments are +// handled in the manner of fmt.Printf. +func (c *context) Notice(format string, args ...interface{}) { + c.logger.Notice(c.prefixFormat()+format, args...) +} + +// Info sends a info level log message to the handler. Arguments are handled in +// the manner of fmt.Printf. +func (c *context) Info(format string, args ...interface{}) { + c.logger.Info(c.prefixFormat()+format, args...) +} + +// Debug sends a debug level log message to the handler. Arguments are handled +// in the manner of fmt.Printf. +func (c *context) Debug(format string, args ...interface{}) { + c.logger.Debug(c.prefixFormat()+format, args...) +} + +// New creates a new Logger from current context +func (c *context) New(prefixes ...interface{}) Logger { + return newContext(c.logger, c.prefix, prefixes...) +} + +func (c *context) prefixFormat() string { + return c.prefix + " " +} + +func newContext(logger logger, initial string, prefixes ...interface{}) *context { + resultPrefix := "" // resultPrefix holds prefix after initialization + connector := "" // connector holds the connector string + + for _, prefix := range prefixes { + resultPrefix += fmt.Sprintf("%s%+v", connector, prefix) + switch connector { + case "=": // if previous is `=` replace with ][ + connector = "][" + case "][": // if previous is `][` replace with = + connector = "=" + default: + connector = "=" // if its first iteration, assing = + } + } + + return &context{ + prefix: initial + "[" + resultPrefix + "]", + logger: logger, + } + +} diff --git a/vendor/github.com/koding/logging/custom.go b/vendor/github.com/koding/logging/custom.go new file mode 100644 index 0000000..63a5e8c --- /dev/null +++ b/vendor/github.com/koding/logging/custom.go @@ -0,0 +1,40 @@ +package logging + +import ( + "fmt" + "os" + "strings" +) + +type CustomFormatter struct{} + +func (f *CustomFormatter) Format(rec *Record) string { + paths := strings.Split(rec.Filename, string(os.PathSeparator)) + // does even anyone uses root folder as their gopath? + filePath := strings.Join(paths[len(paths)-2:], string(os.PathSeparator)) + + return fmt.Sprintf("%-24s %-8s [%-15s][PID:%d][%s:%d] %s", + rec.Time.UTC().Format("2006-01-02T15:04:05.999Z"), + LevelNames[rec.Level], + rec.LoggerName, + rec.ProcessID, + filePath, + rec.Line, + fmt.Sprintf(rec.Format, rec.Args...), + ) +} + +func NewCustom(name string, debug bool) Logger { + log := NewLogger(name) + logHandler := NewWriterHandler(os.Stderr) + logHandler.Formatter = &CustomFormatter{} + logHandler.Colorize = true + log.SetHandler(logHandler) + + if debug { + log.SetLevel(DEBUG) + logHandler.SetLevel(DEBUG) + } + + return log +} diff --git a/vendor/github.com/koding/logging/logging.go b/vendor/github.com/koding/logging/logging.go new file mode 100644 index 0000000..f32273d --- /dev/null +++ b/vendor/github.com/koding/logging/logging.go @@ -0,0 +1,475 @@ +// Package logging is an alternative to log package in standard library. +package logging + +import ( + "fmt" + "io" + "os" + "path/filepath" + "runtime" + "strings" + "sync" + "time" +) + +type ( + // Color represents log level colors + Color int + + // Level represent severity of logs + Level int +) + +// Colors for different log levels. +const ( + BLACK Color = (iota + 30) + RED + GREEN + YELLOW + BLUE + MAGENTA + CYAN + WHITE +) + +// Logging levels. +const ( + CRITICAL Level = iota + ERROR + WARNING + NOTICE + INFO + DEBUG +) + +// LevelNames provides mapping for log levels +var LevelNames = map[Level]string{ + CRITICAL: "CRITICAL", + ERROR: "ERROR", + WARNING: "WARNING", + NOTICE: "NOTICE", + INFO: "INFO", + DEBUG: "DEBUG", +} + +// LevelColors provides mapping for log colors +var LevelColors = map[Level]Color{ + CRITICAL: MAGENTA, + ERROR: RED, + WARNING: YELLOW, + NOTICE: GREEN, + INFO: WHITE, + DEBUG: CYAN, +} + +var ( + // DefaultLogger holds default logger + DefaultLogger Logger = NewLogger(procName()) + + // DefaultLevel holds default value for loggers + DefaultLevel Level = INFO + + // DefaultHandler holds default handler for loggers + DefaultHandler Handler = StderrHandler + + // DefaultFormatter holds default formatter for loggers + DefaultFormatter Formatter = &defaultFormatter{} + + // StdoutHandler holds a handler with outputting to stdout + StdoutHandler = NewWriterHandler(os.Stdout) + + // StderrHandler holds a handler with outputting to stderr + StderrHandler = NewWriterHandler(os.Stderr) +) + +// Logger is the interface for outputing log messages in different levels. +// A new Logger can be created with NewLogger() function. +// You can changed the output handler with SetHandler() function. +type Logger interface { + // SetLevel changes the level of the logger. Default is logging.Info. + SetLevel(Level) + + // SetHandler replaces the current handler for output. Default is logging.StderrHandler. + SetHandler(Handler) + + // SetCallDepth sets the parameter passed to runtime.Caller(). + // It is used to get the file name from call stack. + // For example you need to set it to 1 if you are using a wrapper around + // the Logger. Default value is zero. + SetCallDepth(int) + + // New creates a new inerhited context logger with given prefixes. + New(prefixes ...interface{}) Logger + + // Fatal is equivalent to l.Critical followed by a call to os.Exit(1). + Fatal(format string, args ...interface{}) + + // Panic is equivalent to l.Critical followed by a call to panic(). + Panic(format string, args ...interface{}) + + // Critical logs a message using CRITICAL as log level. + Critical(format string, args ...interface{}) + + // Error logs a message using ERROR as log level. + Error(format string, args ...interface{}) + + // Warning logs a message using WARNING as log level. + Warning(format string, args ...interface{}) + + // Notice logs a message using NOTICE as log level. + Notice(format string, args ...interface{}) + + // Info logs a message using INFO as log level. + Info(format string, args ...interface{}) + + // Debug logs a message using DEBUG as log level. + Debug(format string, args ...interface{}) +} + +// Handler handles the output. +type Handler interface { + SetFormatter(Formatter) + SetLevel(Level) + + // Handle single log record. + Handle(*Record) + + // Close the handler. + Close() +} + +// Record contains all of the information about a single log message. +type Record struct { + Format string // Format string + Args []interface{} // Arguments to format string + LoggerName string // Name of the logger module + Level Level // Level of the record + Time time.Time // Time of the record (local time) + Filename string // File name of the log call (absolute path) + Line int // Line number in file + ProcessID int // PID + ProcessName string // Name of the process +} + +// Formatter formats a record. +type Formatter interface { + // Format the record and return a message. + Format(*Record) (message string) +} + +/////////////////////// +// // +// Default Formatter // +// // +/////////////////////// + +type defaultFormatter struct{} + +// Format outputs a message like "2014-02-28 18:15:57 [example] INFO something happened" +func (f *defaultFormatter) Format(rec *Record) string { + return fmt.Sprintf("%s [%s] %-8s %s", fmt.Sprint(rec.Time)[:19], rec.LoggerName, LevelNames[rec.Level], fmt.Sprintf(rec.Format, rec.Args...)) +} + +/////////////////////////// +// // +// Logger implementation // +// // +/////////////////////////// + +// logger is the default Logger implementation. +type logger struct { + Name string + Level Level + Handler Handler + calldepth int +} + +// NewLogger returns a new Logger implementation. Do not forget to close it at exit. +func NewLogger(name string) Logger { + return &logger{ + Name: name, + Level: DefaultLevel, + Handler: DefaultHandler, + } +} + +// New creates a new inerhited logger with the given prefixes +func (l *logger) New(prefixes ...interface{}) Logger { + return newContext(*l, "", prefixes...) +} + +func (l *logger) SetLevel(level Level) { + l.Level = level +} + +func (l *logger) SetHandler(b Handler) { + l.Handler = b +} + +func (l *logger) SetCallDepth(n int) { + l.calldepth = n +} + +// Fatal is equivalent to Critical() followed by a call to os.Exit(1). +func (l *logger) Fatal(format string, args ...interface{}) { + l.Critical(format, args...) + l.Handler.Close() + os.Exit(1) +} + +// Panic is equivalent to Critical() followed by a call to panic(). +func (l *logger) Panic(format string, args ...interface{}) { + l.Critical(format, args...) + panic(fmt.Sprintf(format, args...)) +} + +// Critical sends a critical level log message to the handler. Arguments are handled in the manner of fmt.Printf. +func (l *logger) Critical(format string, args ...interface{}) { + if l.Level >= CRITICAL { + l.log(CRITICAL, format, args...) + } +} + +// Error sends a error level log message to the handler. Arguments are handled in the manner of fmt.Printf. +func (l *logger) Error(format string, args ...interface{}) { + if l.Level >= ERROR { + l.log(ERROR, format, args...) + } +} + +// Warning sends a warning level log message to the handler. Arguments are handled in the manner of fmt.Printf. +func (l *logger) Warning(format string, args ...interface{}) { + if l.Level >= WARNING { + l.log(WARNING, format, args...) + } +} + +// Notice sends a notice level log message to the handler. Arguments are handled in the manner of fmt.Printf. +func (l *logger) Notice(format string, args ...interface{}) { + if l.Level >= NOTICE { + l.log(NOTICE, format, args...) + } +} + +// Info sends a info level log message to the handler. Arguments are handled in the manner of fmt.Printf. +func (l *logger) Info(format string, args ...interface{}) { + if l.Level >= INFO { + l.log(INFO, format, args...) + } +} + +// Debug sends a debug level log message to the handler. Arguments are handled in the manner of fmt.Printf. +func (l *logger) Debug(format string, args ...interface{}) { + if l.Level >= DEBUG { + l.log(DEBUG, format, args...) + } +} + +func (l *logger) log(level Level, format string, args ...interface{}) { + // Add missing newline at the end. + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + + _, file, line, ok := runtime.Caller(l.calldepth + 2) + if !ok { + file = "???" + line = 0 + } + + rec := &Record{ + Format: format, + Args: args, + LoggerName: l.Name, + Level: level, + Time: time.Now(), + Filename: file, + Line: line, + ProcessName: procName(), + ProcessID: os.Getpid(), + } + + l.Handler.Handle(rec) +} + +// procName returns the name of the current process. +func procName() string { return filepath.Base(os.Args[0]) } + +/////////////////// +// // +// DefaultLogger // +// // +/////////////////// + +// Fatal is equivalent to Critical() followed by a call to os.Exit(1). +func Fatal(format string, args ...interface{}) { + DefaultLogger.Fatal(format, args...) +} + +// Panic is equivalent to Critical() followed by a call to panic(). +func Panic(format string, args ...interface{}) { + DefaultLogger.Panic(format, args...) +} + +// Critical prints a critical level log message to the stderr. Arguments are handled in the manner of fmt.Printf. +func Critical(format string, args ...interface{}) { + DefaultLogger.Critical(format, args...) +} + +// Error prints a error level log message to the stderr. Arguments are handled in the manner of fmt.Printf. +func Error(format string, args ...interface{}) { + DefaultLogger.Error(format, args...) +} + +// Warning prints a warning level log message to the stderr. Arguments are handled in the manner of fmt.Printf. +func Warning(format string, args ...interface{}) { + DefaultLogger.Warning(format, args...) +} + +// Notice prints a notice level log message to the stderr. Arguments are handled in the manner of fmt.Printf. +func Notice(format string, args ...interface{}) { + DefaultLogger.Notice(format, args...) +} + +// Info prints a info level log message to the stderr. Arguments are handled in the manner of fmt.Printf. +func Info(format string, args ...interface{}) { + DefaultLogger.Info(format, args...) +} + +// Debug prints a debug level log message to the stderr. Arguments are handled in the manner of fmt.Printf. +func Debug(format string, args ...interface{}) { + DefaultLogger.Debug(format, args...) +} + +///////////////// +// // +// BaseHandler // +// // +///////////////// + +// BaseHandler provides basic functionality for handler +type BaseHandler struct { + Level Level + Formatter Formatter +} + +// NewBaseHandler creates a newBaseHandler with default values +func NewBaseHandler() *BaseHandler { + return &BaseHandler{ + Level: DefaultLevel, + Formatter: DefaultFormatter, + } +} + +// SetLevel sets logging level for handler +func (h *BaseHandler) SetLevel(l Level) { + h.Level = l +} + +// SetFormatter sets logging formatter for handler +func (h *BaseHandler) SetFormatter(f Formatter) { + h.Formatter = f +} + +// FilterAndFormat filters any record according to loggging level +func (h *BaseHandler) FilterAndFormat(rec *Record) string { + if h.Level >= rec.Level { + return h.Formatter.Format(rec) + } + return "" +} + +/////////////////// +// // +// WriterHandler // +// // +/////////////////// + +// WriterHandler is a handler implementation that writes the logging output to a io.Writer. +type WriterHandler struct { + *BaseHandler + w io.Writer + Colorize bool +} + +// NewWriterHandler creates a new writer handler with given io.Writer +func NewWriterHandler(w io.Writer) *WriterHandler { + return &WriterHandler{ + BaseHandler: NewBaseHandler(), + w: w, + } +} + +// Handle writes any given Record to the Writer. +func (b *WriterHandler) Handle(rec *Record) { + message := b.BaseHandler.FilterAndFormat(rec) + if message == "" { + return + } + if b.Colorize { + b.w.Write([]byte(fmt.Sprintf("\033[%dm", LevelColors[rec.Level]))) + } + fmt.Fprint(b.w, message) + if b.Colorize { + b.w.Write([]byte("\033[0m")) // reset color + } +} + +// Close closes WriterHandler +func (b *WriterHandler) Close() {} + +////////////////// +// // +// MultiHandler // +// // +////////////////// + +// MultiHandler sends the log output to multiple handlers concurrently. +type MultiHandler struct { + handlers []Handler +} + +// NewMultiHandler creates a new handler with given handlers +func NewMultiHandler(handlers ...Handler) *MultiHandler { + return &MultiHandler{handlers: handlers} +} + +// SetFormatter sets formatter for all handlers +func (b *MultiHandler) SetFormatter(f Formatter) { + for _, h := range b.handlers { + h.SetFormatter(f) + } +} + +// SetLevel sets level for all handlers +func (b *MultiHandler) SetLevel(l Level) { + for _, h := range b.handlers { + h.SetLevel(l) + } +} + +// Handle handles given record with all handlers concurrently +func (b *MultiHandler) Handle(rec *Record) { + wg := sync.WaitGroup{} + wg.Add(len(b.handlers)) + for _, handler := range b.handlers { + go func(handler Handler) { + handler.Handle(rec) + wg.Done() + }(handler) + } + wg.Wait() +} + +// Close closes all handlers concurrently +func (b *MultiHandler) Close() { + wg := sync.WaitGroup{} + wg.Add(len(b.handlers)) + for _, handler := range b.handlers { + go func(handler Handler) { + handler.Close() + wg.Done() + }(handler) + } + wg.Wait() +} diff --git a/vendor/github.com/koding/logging/logging_unix.go b/vendor/github.com/koding/logging/logging_unix.go new file mode 100644 index 0000000..17f8188 --- /dev/null +++ b/vendor/github.com/koding/logging/logging_unix.go @@ -0,0 +1,8 @@ +// +build darwin freebsd linux netbsd openbsd + +package logging + +func init() { + StdoutHandler.Colorize = true + StderrHandler.Colorize = true +} diff --git a/vendor/github.com/koding/logging/sink.go b/vendor/github.com/koding/logging/sink.go new file mode 100644 index 0000000..500975e --- /dev/null +++ b/vendor/github.com/koding/logging/sink.go @@ -0,0 +1,81 @@ +package logging + +import ( + "fmt" + "os" + "sync" +) + +///////////////// +// // +// SinkHandler // +// // +///////////////// + +// SinkHandler sends log records to buffered channel, the logs are written in a dedicated routine consuming the channel. +type SinkHandler struct { + inner Handler + sinkCh chan *Record + bufSize int + wg sync.WaitGroup +} + +// NewSinkHandler creates SinkHandler with sink channel buffer size bufSize that wraps inner handler for writing logs. +// When SinkHandler is created a go routine is started. When not used always call Close to terminate go routine. +func NewSinkHandler(inner Handler, bufSize int) *SinkHandler { + b := &SinkHandler{ + inner: inner, + sinkCh: make(chan *Record, bufSize), + bufSize: bufSize, + } + + b.wg.Add(1) + go b.process() + + return b +} + +// process reads log records from sinkCh and calls inner log handler to write it. +func (b *SinkHandler) process() { + for { + rec, ok := <-b.sinkCh + if !ok { + b.inner.Close() + break + } + + b.inner.Handle(rec) + } + b.wg.Done() +} + +// Status reports sink capacity and length. +func (b *SinkHandler) Status() (int, int) { + return b.bufSize, len(b.sinkCh) +} + +// SetLevel sets logging level for handler +func (b *SinkHandler) SetLevel(l Level) { + b.inner.SetLevel(l) +} + +// SetFormatter sets logging formatter for handler +func (b *SinkHandler) SetFormatter(f Formatter) { + b.inner.SetFormatter(f) +} + +// Handle puts rec to the sink. +func (b *SinkHandler) Handle(rec *Record) { + select { + case b.sinkCh <- rec: + default: + fmt.Fprintf(os.Stderr, "SinkHandler buffer too small dropping record\n") + } +} + +// Close closes the sink channel, inner handler will be closed when all pending logs are processed. +// Close blocks until all the logs are processed. +func (b *SinkHandler) Close() { + close(b.sinkCh) + b.wg.Wait() +} diff --git a/vendor/github.com/koding/logging/syslog.go b/vendor/github.com/koding/logging/syslog.go new file mode 100644 index 0000000..a8cb117 --- /dev/null +++ b/vendor/github.com/koding/logging/syslog.go @@ -0,0 +1,60 @@ +// +build !windows,!plan9 + +package logging + +import ( + "log/syslog" +) + +/////////////////// +// // +// SyslogHandler // +// // +/////////////////// + +// SyslogHandler sends the logging output to syslog. +type SyslogHandler struct { + *BaseHandler + w *syslog.Writer +} + +func NewSyslogHandler(tag string) (*SyslogHandler, error) { + // Priority in New constructor is not important here because we + // do not use w.Write() directly. + w, err := syslog.New(syslog.LOG_INFO|syslog.LOG_USER, tag) + if err != nil { + return nil, err + } + return &SyslogHandler{ + BaseHandler: NewBaseHandler(), + w: w, + }, nil +} + +func (b *SyslogHandler) Handle(rec *Record) { + message := b.BaseHandler.FilterAndFormat(rec) + if message == "" { + return + } + + var fn func(string) error + switch rec.Level { + case CRITICAL: + fn = b.w.Crit + case ERROR: + fn = b.w.Err + case WARNING: + fn = b.w.Warning + case NOTICE: + fn = b.w.Notice + case INFO: + fn = b.w.Info + case DEBUG: + fn = b.w.Debug + } + fn(message) +} + +func (b *SyslogHandler) Close() { + b.w.Close() +} diff --git a/vendor/github.com/sycamoreone/orc/tor/config.go b/vendor/github.com/sycamoreone/orc/tor/config.go new file mode 100644 index 0000000..f41e79f --- /dev/null +++ b/vendor/github.com/sycamoreone/orc/tor/config.go @@ -0,0 +1,80 @@ +package tor + +import ( + "time" + "errors" + "fmt" + "strings" + "strconv" +) + +// A Config struct is used to configure a to be executed Tor process. +type Config struct { + // Path is the path to a tor executable to be run. If path is the empty string, + // $PATH is used to search for a tor executable. + Path string + // Timeout is the maximum amount of time we will wait for + // a connect to the Tor network to complete. + Timeout time.Duration + // Options is a map of configuration options to values to be used + // as command line arguments or in a torrc configuration file. + Options map[string]string + err error +} + +func NewConfig() *Config { + c := &Config{ + Path: "", + Options: make(map[string]string), + err: nil, + } + return c +} + +func (c *Config) setErr(format string, a ...interface{}) { + err := errors.New(fmt.Sprintf(format, a...)) + if c.err == nil { + c.err = err + } +} + +func (c *Config) Set(option string, value interface{}) { + switch v := value.(type) { + case int: + c.Options[option] = strconv.Itoa(v) + case string: + c.Options[option] = dquote(v) + default: + c.setErr("value %v for option %s is not a string or int", value, option) + } +} + +// dquote returns s quoted in double-quotes, if it isn't already quoted and contains a space. +// Otherwise it just returns s itself. +func dquote(s string) string { + if s[0] == '"' && s[len(s)-1] == '"' { + // TODO check if there is a " in between the quotes that is not escaped using \ + return s + } + if strings.ContainsRune(s, ' ') { + return "\"" + s + "\"" + } + return s +} + +// Err reports the first error that was encountered during the preceding calls to Set() +// and clears the saved error value to nil. +func (c *Config) Err() error { + err := c.err + c.err = nil + return err +} + +func (c Config) ToCmdLineFormat() []string { + args := make([]string, 0) + for k, v := range c.Options { + args = append(args, "--"+k) + args = append(args, v) + } + return args +} diff --git a/vendor/github.com/sycamoreone/orc/tor/exec.go b/vendor/github.com/sycamoreone/orc/tor/exec.go new file mode 100644 index 0000000..b5625cf --- /dev/null +++ b/vendor/github.com/sycamoreone/orc/tor/exec.go @@ -0,0 +1,74 @@ +// Package tor supplies helper functions to start a tor binary as a slave process. +package tor + +import ( + "time" + "os/exec" + "io" + "bufio" + "strings" + "errors" +) + +// Cmd represents an tor executable to be run as a slave process. +type Cmd struct { + Config *Config + cmd *exec.Cmd + stdout io.ReadCloser +} + +// NewCmd returns a Cmd to run a tor process using the configuration values in config. +func NewCmd(config *Config) (*Cmd, error) { + if config.Path == "" { + file, err := exec.LookPath("tor") + if err != nil { + return nil, err + } + config.Path = file + } + + cmd := exec.Command(config.Path, config.ToCmdLineFormat()...) + stdout, err := cmd.StdoutPipe() + if err != nil { + return nil, err + } + return &Cmd{ + Config: config, + cmd: cmd, + stdout: stdout, + }, nil +} + +func (c *Cmd) Start() error { + deadline := time.Now().Add(c.Config.Timeout) + err := c.cmd.Start() + if err != nil { + return err + } + + // Read output until one gets a "Bootstrapped 100%: Done" notice. + buf := bufio.NewReader(c.stdout) + line, err := buf.ReadString('\n') + for err == nil { + if time.Now().After(deadline) { + _ = c.cmd.Process.Kill() + return errors.New("orc/tor: process killed because of timeout") + } + if strings.Contains(line, "Bootstrapped 100%: Done") { + break + } + line, err = buf.ReadString('\n') + } + return nil +} + +func (c *Cmd) Wait() error { + return c.cmd.Wait() +} + +func (c *Cmd) KillUnlessExited() { + if c.cmd.ProcessState.Exited() { + return + } + c.cmd.Process.Kill() +} \ No newline at end of file diff --git a/vendor/github.com/yawning/bulb/utils/pkcs1/rsa.go b/vendor/github.com/yawning/bulb/utils/pkcs1/rsa.go new file mode 100644 index 0000000..beb740e --- /dev/null +++ b/vendor/github.com/yawning/bulb/utils/pkcs1/rsa.go @@ -0,0 +1,101 @@ +// +// rsa.go - PKCS#1 RSA key related helpers. +// +// To the extent possible under law, Yawning Angel has waived all copyright and +// related or neighboring rights to bulb, using the creative commons +// "cc0" public domain dedication. See LICENSE or +// for full details. + +// Package pkcs1 implements PKCS#1 RSA key marshalling/unmarshalling, +// compatibile with Tor's usage. +package pkcs1 + +import ( + "crypto/rsa" + "crypto/sha1" + "encoding/asn1" + "encoding/base32" + "math/big" + "strings" +) + +type pkcs1RSAPrivKey struct { + Version int // version + N *big.Int // modulus + E int // publicExponent + D *big.Int // privateExponent + P *big.Int // prime1 + Q *big.Int // prime2 + Dp *big.Int // exponent1: d mod (p-1) + Dq *big.Int // exponent2: d mod (q-1) + Qinv *big.Int // coefficient: (inverse of q) mod p +} + +// EncodePrivateKeyDER returns the PKCS#1 DER encoding of a rsa.PrivateKey. +func EncodePrivateKeyDER(sk *rsa.PrivateKey) ([]byte, error) { + // The crypto.RSA structure has a slightly different layout than PKCS#1 + // private keys, so directly marshaling does not work. Pull out the values + // into a strucuture with the correct layout and marshal. + sk.Precompute() // Ensure that the structure is fully populated. + k := pkcs1RSAPrivKey{ + Version: 0, + N: sk.N, + E: sk.E, + D: sk.D, + P: sk.Primes[0], + Q: sk.Primes[1], + Dp: sk.Precomputed.Dp, + Dq: sk.Precomputed.Dq, + Qinv: sk.Precomputed.Qinv, + } + return asn1.Marshal(k) +} + +// DecodePrivateKeyDER returns the rsa.PrivateKey decoding of a PKCS#1 DER blob. +func DecodePrivateKeyDER(b []byte) (*rsa.PrivateKey, []byte, error) { + var k pkcs1RSAPrivKey + rest, err := asn1.Unmarshal(b, &k) + if err == nil { + sk := &rsa.PrivateKey{} + sk.Primes = make([]*big.Int, 2) + sk.N = k.N + sk.E = k.E + sk.D = k.D + sk.Primes[0] = k.P + sk.Primes[1] = k.Q + + // Ignore the precomputed values and just rederive them. + sk.Precompute() + return sk, rest, nil + } + return nil, rest, err +} + +// EncodePublicKeyDER returns the PKCS#1 DER encoding of a rsa.PublicKey. +func EncodePublicKeyDER(pk *rsa.PublicKey) ([]byte, error) { + // The crypto.RSA structure is exactly the same as the PCKS#1 public keys, + // when the encoding/asn.1 marshaller is done with it. + // + // DER encoding of (SEQUENCE | INTEGER(n) | INTEGER(e)) + return asn1.Marshal(*pk) +} + +// DecodePublicKeyDER returns the rsa.PublicKey decoding of a PKCS#1 DER blob. +func DecodePublicKeyDER(b []byte) (*rsa.PublicKey, []byte, error) { + pk := &rsa.PublicKey{} + rest, err := asn1.Unmarshal(b, pk) + return pk, rest, err +} + +// OnionAddr returns the Tor Onion Service address corresponding to a given +// rsa.PublicKey. +func OnionAddr(pk *rsa.PublicKey) (string, error) { + der, err := EncodePublicKeyDER(pk) + if err != nil { + return "", err + } + h := sha1.Sum(der) + hb32 := base32.StdEncoding.EncodeToString(h[:10]) + + return strings.ToLower(hb32), nil +} diff --git a/vendor/github.com/yawning/bulb/utils/utils.go b/vendor/github.com/yawning/bulb/utils/utils.go new file mode 100644 index 0000000..d741a8b --- /dev/null +++ b/vendor/github.com/yawning/bulb/utils/utils.go @@ -0,0 +1,81 @@ +// utils.go - A grab bag of useful utilitiy functions. +// +// To the extent possible under law, Yawning Angel waived all copyright +// and related or neighboring rights to bulb, using the creative +// commons "cc0" public domain dedication. See LICENSE or +// for full details. + +// Package utils implements useful utilities for dealing with Tor and it's +// control port. +package utils + +import ( + "net" + "net/url" + "strconv" +) + +// SplitQuoted splits s by sep if it is found outside substring +// quoted by quote. +func SplitQuoted(s string, quote, sep rune) (splitted []string) { + quoteFlag := false +NewSubstring: + for i, c := range s { + if c == quote { + quoteFlag = !quoteFlag + } + if c == sep && !quoteFlag { + splitted = append(splitted, s[:i]) + s = s[i+1:] + goto NewSubstring + } + } + return append(splitted, s) +} + +// ParseControlPortString parses a string representation of a control port +// address into a network/address string pair suitable for use with "dial". +// +// Valid string representations are: +// * tcp://address:port +// * unix://path +// * port (Translates to tcp://127.0.0.1:port) +func ParseControlPortString(raw string) (network, addr string, err error) { + // Try parsing it as a naked port. + if _, err = strconv.ParseUint(raw, 10, 16); err == nil { + raw = "tcp://127.0.0.1:" + raw + } + + // Ok, parse/validate the URI. + uri, err := url.Parse(raw) + if err != nil { + return "", "", err + } + if uri.Opaque != "" || uri.RawQuery != "" || uri.Fragment != "" { + return "", "", net.InvalidAddrError("uri has Opaque/Query/Fragment") + } + switch uri.Scheme { + case "tcp": + if uri.Path != "" { + return "", "", net.InvalidAddrError("tcp uri has a path") + } + tcpAddr, err := net.ResolveTCPAddr(uri.Scheme, uri.Host) + if err != nil { + return "", "", err + } + if tcpAddr.Port == 0 { + return "", "", net.InvalidAddrError("tcp uri is missing a port") + } + return uri.Scheme, uri.Host, nil + case "unix": + if uri.Host != "" { + return "", "", net.InvalidAddrError("unix uri has a host") + } + _, err := net.ResolveUnixAddr(uri.Scheme, uri.Path) + if err != nil { + return "", "", err + } + return uri.Scheme, uri.Path, nil + } + return "", "", net.InvalidAddrError("unknown scheme: " + uri.Scheme) +} diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go new file mode 100644 index 0000000..593f653 --- /dev/null +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2.go @@ -0,0 +1,77 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package pbkdf2 implements the key derivation function PBKDF2 as defined in RFC +2898 / PKCS #5 v2.0. + +A key derivation function is useful when encrypting data based on a password +or any other not-fully-random data. It uses a pseudorandom function to derive +a secure encryption key based on the password. + +While v2.0 of the standard defines only one pseudorandom function to use, +HMAC-SHA1, the drafted v2.1 specification allows use of all five FIPS Approved +Hash Functions SHA-1, SHA-224, SHA-256, SHA-384 and SHA-512 for HMAC. To +choose, you can pass the `New` functions from the different SHA packages to +pbkdf2.Key. +*/ +package pbkdf2 // import "golang.org/x/crypto/pbkdf2" + +import ( + "crypto/hmac" + "hash" +) + +// Key derives a key from the password, salt and iteration count, returning a +// []byte of length keylen that can be used as cryptographic key. The key is +// derived based on the method described as PBKDF2 with the HMAC variant using +// the supplied hash function. +// +// For example, to use a HMAC-SHA-1 based PBKDF2 key derivation function, you +// can get a derived key for e.g. AES-256 (which needs a 32-byte key) by +// doing: +// +// dk := pbkdf2.Key([]byte("some password"), salt, 4096, 32, sha1.New) +// +// Remember to get a good random salt. At least 8 bytes is recommended by the +// RFC. +// +// Using a higher iteration count will increase the cost of an exhaustive +// search but will also make derivation proportionally slower. +func Key(password, salt []byte, iter, keyLen int, h func() hash.Hash) []byte { + prf := hmac.New(h, password) + hashLen := prf.Size() + numBlocks := (keyLen + hashLen - 1) / hashLen + + var buf [4]byte + dk := make([]byte, 0, numBlocks*hashLen) + U := make([]byte, hashLen) + for block := 1; block <= numBlocks; block++ { + // N.B.: || means concatenation, ^ means XOR + // for each block T_i = U_1 ^ U_2 ^ ... ^ U_iter + // U_1 = PRF(password, salt || uint(i)) + prf.Reset() + prf.Write(salt) + buf[0] = byte(block >> 24) + buf[1] = byte(block >> 16) + buf[2] = byte(block >> 8) + buf[3] = byte(block) + prf.Write(buf[:4]) + dk = prf.Sum(dk) + T := dk[len(dk)-hashLen:] + copy(U, T) + + // U_n = PRF(password, U_(n-1)) + for n := 2; n <= iter; n++ { + prf.Reset() + prf.Write(U) + U = U[:0] + U = prf.Sum(U) + for x := range U { + T[x] ^= U[x] + } + } + } + return dk[:keyLen] +} diff --git a/vendor/golang.org/x/net/proxy/direct.go b/vendor/golang.org/x/net/proxy/direct.go new file mode 100644 index 0000000..4c5ad88 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/direct.go @@ -0,0 +1,18 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" +) + +type direct struct{} + +// Direct is a direct proxy: one that makes network connections directly. +var Direct = direct{} + +func (direct) Dial(network, addr string) (net.Conn, error) { + return net.Dial(network, addr) +} diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go new file mode 100644 index 0000000..f540b19 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/per_host.go @@ -0,0 +1,140 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "net" + "strings" +) + +// A PerHost directs connections to a default Dialer unless the hostname +// requested matches one of a number of exceptions. +type PerHost struct { + def, bypass Dialer + + bypassNetworks []*net.IPNet + bypassIPs []net.IP + bypassZones []string + bypassHosts []string +} + +// NewPerHost returns a PerHost Dialer that directs connections to either +// defaultDialer or bypass, depending on whether the connection matches one of +// the configured rules. +func NewPerHost(defaultDialer, bypass Dialer) *PerHost { + return &PerHost{ + def: defaultDialer, + bypass: bypass, + } +} + +// Dial connects to the address addr on the given network through either +// defaultDialer or bypass. +func (p *PerHost) Dial(network, addr string) (c net.Conn, err error) { + host, _, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + return p.dialerForRequest(host).Dial(network, addr) +} + +func (p *PerHost) dialerForRequest(host string) Dialer { + if ip := net.ParseIP(host); ip != nil { + for _, net := range p.bypassNetworks { + if net.Contains(ip) { + return p.bypass + } + } + for _, bypassIP := range p.bypassIPs { + if bypassIP.Equal(ip) { + return p.bypass + } + } + return p.def + } + + for _, zone := range p.bypassZones { + if strings.HasSuffix(host, zone) { + return p.bypass + } + if host == zone[1:] { + // For a zone "example.com", we match "example.com" + // too. + return p.bypass + } + } + for _, bypassHost := range p.bypassHosts { + if bypassHost == host { + return p.bypass + } + } + return p.def +} + +// AddFromString parses a string that contains comma-separated values +// specifying hosts that should use the bypass proxy. Each value is either an +// IP address, a CIDR range, a zone (*.example.com) or a hostname +// (localhost). A best effort is made to parse the string and errors are +// ignored. +func (p *PerHost) AddFromString(s string) { + hosts := strings.Split(s, ",") + for _, host := range hosts { + host = strings.TrimSpace(host) + if len(host) == 0 { + continue + } + if strings.Contains(host, "/") { + // We assume that it's a CIDR address like 127.0.0.0/8 + if _, net, err := net.ParseCIDR(host); err == nil { + p.AddNetwork(net) + } + continue + } + if ip := net.ParseIP(host); ip != nil { + p.AddIP(ip) + continue + } + if strings.HasPrefix(host, "*.") { + p.AddZone(host[1:]) + continue + } + p.AddHost(host) + } +} + +// AddIP specifies an IP address that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match an IP. +func (p *PerHost) AddIP(ip net.IP) { + p.bypassIPs = append(p.bypassIPs, ip) +} + +// AddNetwork specifies an IP range that will use the bypass proxy. Note that +// this will only take effect if a literal IP address is dialed. A connection +// to a named host will never match. +func (p *PerHost) AddNetwork(net *net.IPNet) { + p.bypassNetworks = append(p.bypassNetworks, net) +} + +// AddZone specifies a DNS suffix that will use the bypass proxy. A zone of +// "example.com" matches "example.com" and all of its subdomains. +func (p *PerHost) AddZone(zone string) { + if strings.HasSuffix(zone, ".") { + zone = zone[:len(zone)-1] + } + if !strings.HasPrefix(zone, ".") { + zone = "." + zone + } + p.bypassZones = append(p.bypassZones, zone) +} + +// AddHost specifies a hostname that will use the bypass proxy. +func (p *PerHost) AddHost(host string) { + if strings.HasSuffix(host, ".") { + host = host[:len(host)-1] + } + p.bypassHosts = append(p.bypassHosts, host) +} diff --git a/vendor/golang.org/x/net/proxy/proxy.go b/vendor/golang.org/x/net/proxy/proxy.go new file mode 100644 index 0000000..78a8b7b --- /dev/null +++ b/vendor/golang.org/x/net/proxy/proxy.go @@ -0,0 +1,94 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package proxy provides support for a variety of protocols to proxy network +// data. +package proxy // import "golang.org/x/net/proxy" + +import ( + "errors" + "net" + "net/url" + "os" +) + +// A Dialer is a means to establish a connection. +type Dialer interface { + // Dial connects to the given address via the proxy. + Dial(network, addr string) (c net.Conn, err error) +} + +// Auth contains authentication parameters that specific Dialers may require. +type Auth struct { + User, Password string +} + +// FromEnvironment returns the dialer specified by the proxy related variables in +// the environment. +func FromEnvironment() Dialer { + allProxy := os.Getenv("all_proxy") + if len(allProxy) == 0 { + return Direct + } + + proxyURL, err := url.Parse(allProxy) + if err != nil { + return Direct + } + proxy, err := FromURL(proxyURL, Direct) + if err != nil { + return Direct + } + + noProxy := os.Getenv("no_proxy") + if len(noProxy) == 0 { + return proxy + } + + perHost := NewPerHost(proxy, Direct) + perHost.AddFromString(noProxy) + return perHost +} + +// proxySchemes is a map from URL schemes to a function that creates a Dialer +// from a URL with such a scheme. +var proxySchemes map[string]func(*url.URL, Dialer) (Dialer, error) + +// RegisterDialerType takes a URL scheme and a function to generate Dialers from +// a URL with that scheme and a forwarding Dialer. Registered schemes are used +// by FromURL. +func RegisterDialerType(scheme string, f func(*url.URL, Dialer) (Dialer, error)) { + if proxySchemes == nil { + proxySchemes = make(map[string]func(*url.URL, Dialer) (Dialer, error)) + } + proxySchemes[scheme] = f +} + +// FromURL returns a Dialer given a URL specification and an underlying +// Dialer for it to make network requests. +func FromURL(u *url.URL, forward Dialer) (Dialer, error) { + var auth *Auth + if u.User != nil { + auth = new(Auth) + auth.User = u.User.Username() + if p, ok := u.User.Password(); ok { + auth.Password = p + } + } + + switch u.Scheme { + case "socks5": + return SOCKS5("tcp", u.Host, auth, forward) + } + + // If the scheme doesn't match any of the built-in schemes, see if it + // was registered by another package. + if proxySchemes != nil { + if f, ok := proxySchemes[u.Scheme]; ok { + return f(u, forward) + } + } + + return nil, errors.New("proxy: unknown scheme: " + u.Scheme) +} diff --git a/vendor/golang.org/x/net/proxy/socks5.go b/vendor/golang.org/x/net/proxy/socks5.go new file mode 100644 index 0000000..9b96282 --- /dev/null +++ b/vendor/golang.org/x/net/proxy/socks5.go @@ -0,0 +1,210 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package proxy + +import ( + "errors" + "io" + "net" + "strconv" +) + +// SOCKS5 returns a Dialer that makes SOCKSv5 connections to the given address +// with an optional username and password. See RFC 1928. +func SOCKS5(network, addr string, auth *Auth, forward Dialer) (Dialer, error) { + s := &socks5{ + network: network, + addr: addr, + forward: forward, + } + if auth != nil { + s.user = auth.User + s.password = auth.Password + } + + return s, nil +} + +type socks5 struct { + user, password string + network, addr string + forward Dialer +} + +const socks5Version = 5 + +const ( + socks5AuthNone = 0 + socks5AuthPassword = 2 +) + +const socks5Connect = 1 + +const ( + socks5IP4 = 1 + socks5Domain = 3 + socks5IP6 = 4 +) + +var socks5Errors = []string{ + "", + "general failure", + "connection forbidden", + "network unreachable", + "host unreachable", + "connection refused", + "TTL expired", + "command not supported", + "address type not supported", +} + +// Dial connects to the address addr on the network net via the SOCKS5 proxy. +func (s *socks5) Dial(network, addr string) (net.Conn, error) { + switch network { + case "tcp", "tcp6", "tcp4": + default: + return nil, errors.New("proxy: no support for SOCKS5 proxy connections of type " + network) + } + + conn, err := s.forward.Dial(s.network, s.addr) + if err != nil { + return nil, err + } + closeConn := &conn + defer func() { + if closeConn != nil { + (*closeConn).Close() + } + }() + + host, portStr, err := net.SplitHostPort(addr) + if err != nil { + return nil, err + } + + port, err := strconv.Atoi(portStr) + if err != nil { + return nil, errors.New("proxy: failed to parse port number: " + portStr) + } + if port < 1 || port > 0xffff { + return nil, errors.New("proxy: port number out of range: " + portStr) + } + + // the size here is just an estimate + buf := make([]byte, 0, 6+len(host)) + + buf = append(buf, socks5Version) + if len(s.user) > 0 && len(s.user) < 256 && len(s.password) < 256 { + buf = append(buf, 2 /* num auth methods */, socks5AuthNone, socks5AuthPassword) + } else { + buf = append(buf, 1 /* num auth methods */, socks5AuthNone) + } + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write greeting to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read greeting from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + if buf[0] != 5 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " has unexpected version " + strconv.Itoa(int(buf[0]))) + } + if buf[1] == 0xff { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " requires authentication") + } + + if buf[1] == socks5AuthPassword { + buf = buf[:0] + buf = append(buf, 1 /* password protocol version */) + buf = append(buf, uint8(len(s.user))) + buf = append(buf, s.user...) + buf = append(buf, uint8(len(s.password))) + buf = append(buf, s.password...) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write authentication request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read authentication reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if buf[1] != 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " rejected username/password") + } + } + + buf = buf[:0] + buf = append(buf, socks5Version, socks5Connect, 0 /* reserved */) + + if ip := net.ParseIP(host); ip != nil { + if ip4 := ip.To4(); ip4 != nil { + buf = append(buf, socks5IP4) + ip = ip4 + } else { + buf = append(buf, socks5IP6) + } + buf = append(buf, ip...) + } else { + if len(host) > 255 { + return nil, errors.New("proxy: destination hostname too long: " + host) + } + buf = append(buf, socks5Domain) + buf = append(buf, byte(len(host))) + buf = append(buf, host...) + } + buf = append(buf, byte(port>>8), byte(port)) + + if _, err := conn.Write(buf); err != nil { + return nil, errors.New("proxy: failed to write connect request to SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + if _, err := io.ReadFull(conn, buf[:4]); err != nil { + return nil, errors.New("proxy: failed to read connect reply from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + failure := "unknown error" + if int(buf[1]) < len(socks5Errors) { + failure = socks5Errors[buf[1]] + } + + if len(failure) > 0 { + return nil, errors.New("proxy: SOCKS5 proxy at " + s.addr + " failed to connect: " + failure) + } + + bytesToDiscard := 0 + switch buf[3] { + case socks5IP4: + bytesToDiscard = net.IPv4len + case socks5IP6: + bytesToDiscard = net.IPv6len + case socks5Domain: + _, err := io.ReadFull(conn, buf[:1]) + if err != nil { + return nil, errors.New("proxy: failed to read domain length from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + bytesToDiscard = int(buf[0]) + default: + return nil, errors.New("proxy: got unknown address type " + strconv.Itoa(int(buf[3])) + " from SOCKS5 proxy at " + s.addr) + } + + if cap(buf) < bytesToDiscard { + buf = make([]byte, bytesToDiscard) + } else { + buf = buf[:bytesToDiscard] + } + if _, err := io.ReadFull(conn, buf); err != nil { + return nil, errors.New("proxy: failed to read address from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + // Also need to discard the port number + if _, err := io.ReadFull(conn, buf[:2]); err != nil { + return nil, errors.New("proxy: failed to read port from SOCKS5 proxy at " + s.addr + ": " + err.Error()) + } + + closeConn = nil + return conn, nil +} diff --git a/vendor/gopkg.in/fatih/pool.v2/channel.go b/vendor/gopkg.in/fatih/pool.v2/channel.go new file mode 100644 index 0000000..6218fe1 --- /dev/null +++ b/vendor/gopkg.in/fatih/pool.v2/channel.go @@ -0,0 +1,131 @@ +package pool + +import ( + "errors" + "fmt" + "net" + "sync" +) + +// channelPool implements the Pool interface based on buffered channels. +type channelPool struct { + // storage for our net.Conn connections + mu sync.Mutex + conns chan net.Conn + + // net.Conn generator + factory Factory +} + +// Factory is a function to create new connections. +type Factory func() (net.Conn, error) + +// NewChannelPool returns a new pool based on buffered channels with an initial +// capacity and maximum capacity. Factory is used when initial capacity is +// greater than zero to fill the pool. A zero initialCap doesn't fill the Pool +// until a new Get() is called. During a Get(), If there is no new connection +// available in the pool, a new connection will be created via the Factory() +// method. +func NewChannelPool(initialCap, maxCap int, factory Factory) (Pool, error) { + if initialCap < 0 || maxCap <= 0 || initialCap > maxCap { + return nil, errors.New("invalid capacity settings") + } + + c := &channelPool{ + conns: make(chan net.Conn, maxCap), + factory: factory, + } + + // create initial connections, if something goes wrong, + // just close the pool error out. + for i := 0; i < initialCap; i++ { + conn, err := factory() + if err != nil { + c.Close() + return nil, fmt.Errorf("factory is not able to fill the pool: %s", err) + } + c.conns <- conn + } + + return c, nil +} + +func (c *channelPool) getConns() chan net.Conn { + c.mu.Lock() + conns := c.conns + c.mu.Unlock() + return conns +} + +// Get implements the Pool interfaces Get() method. If there is no new +// connection available in the pool, a new connection will be created via the +// Factory() method. +func (c *channelPool) Get() (net.Conn, error) { + conns := c.getConns() + if conns == nil { + return nil, ErrClosed + } + + // wrap our connections with out custom net.Conn implementation (wrapConn + // method) that puts the connection back to the pool if it's closed. + select { + case conn := <-conns: + if conn == nil { + return nil, ErrClosed + } + + return c.wrapConn(conn), nil + default: + conn, err := c.factory() + if err != nil { + return nil, err + } + + return c.wrapConn(conn), nil + } +} + +// put puts the connection back to the pool. If the pool is full or closed, +// conn is simply closed. A nil conn will be rejected. +func (c *channelPool) put(conn net.Conn) error { + if conn == nil { + return errors.New("connection is nil. rejecting") + } + + c.mu.Lock() + defer c.mu.Unlock() + + if c.conns == nil { + // pool is closed, close passed connection + return conn.Close() + } + + // put the resource back into the pool. If the pool is full, this will + // block and the default case will be executed. + select { + case c.conns <- conn: + return nil + default: + // pool is full, close passed connection + return conn.Close() + } +} + +func (c *channelPool) Close() { + c.mu.Lock() + conns := c.conns + c.conns = nil + c.factory = nil + c.mu.Unlock() + + if conns == nil { + return + } + + close(conns) + for conn := range conns { + conn.Close() + } +} + +func (c *channelPool) Len() int { return len(c.getConns()) } diff --git a/vendor/gopkg.in/fatih/pool.v2/conn.go b/vendor/gopkg.in/fatih/pool.v2/conn.go new file mode 100644 index 0000000..693488c --- /dev/null +++ b/vendor/gopkg.in/fatih/pool.v2/conn.go @@ -0,0 +1,43 @@ +package pool + +import ( + "net" + "sync" +) + +// PoolConn is a wrapper around net.Conn to modify the the behavior of +// net.Conn's Close() method. +type PoolConn struct { + net.Conn + mu sync.RWMutex + c *channelPool + unusable bool +} + +// Close() puts the given connects back to the pool instead of closing it. +func (p *PoolConn) Close() error { + p.mu.RLock() + defer p.mu.RUnlock() + + if p.unusable { + if p.Conn != nil { + return p.Conn.Close() + } + return nil + } + return p.c.put(p.Conn) +} + +// MarkUnusable() marks the connection not usable any more, to let the pool close it instead of returning it to pool. +func (p *PoolConn) MarkUnusable() { + p.mu.Lock() + p.unusable = true + p.mu.Unlock() +} + +// newConn wraps a standard net.Conn to a poolConn net.Conn. +func (c *channelPool) wrapConn(conn net.Conn) net.Conn { + p := &PoolConn{c: c} + p.Conn = conn + return p +} diff --git a/vendor/gopkg.in/fatih/pool.v2/pool.go b/vendor/gopkg.in/fatih/pool.v2/pool.go new file mode 100644 index 0000000..f88f2ac --- /dev/null +++ b/vendor/gopkg.in/fatih/pool.v2/pool.go @@ -0,0 +1,28 @@ +// Package pool implements a pool of net.Conn interfaces to manage and reuse them. +package pool + +import ( + "errors" + "net" +) + +var ( + // ErrClosed is the error resulting if the pool is closed via pool.Close(). + ErrClosed = errors.New("pool is closed") +) + +// Pool interface describes a pool implementation. A pool should have maximum +// capacity. An ideal pool is threadsafe and easy to use. +type Pool interface { + // Get returns a new connection from the pool. Closing the connections puts + // it back to the Pool. Closing it when the pool is destroyed or full will + // be counted as an error. + Get() (net.Conn, error) + + // Close closes the pool and all its connections. After Close() the pool is + // no longer usable. + Close() + + // Len returns the current number of connections of the pool. + Len() int +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/cache.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/cache.go new file mode 100644 index 0000000..50ec0c1 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/cache.go @@ -0,0 +1,283 @@ +// This code is based on encoding/json and gorilla/schema + +package encoding + +import ( + "reflect" + "sort" + "sync" + "time" +) + +// A field represents a single field found in a struct. +type field struct { + name string + nameBytes []byte // []byte(name) + equalFold func(s, t []byte) bool + + tag bool + index []int + typ reflect.Type + omitEmpty bool + quoted bool + reference bool + refName string + compound bool + compoundIndex int +} + +func fillField(f field) field { + f.nameBytes = []byte(f.name) + f.equalFold = foldFunc(f.nameBytes) + + return f +} + +// byName sorts field by name, breaking ties with depth, +// then breaking ties with "name came from tag", then +// breaking ties with index sequence. +type byName []field + +func (x byName) Len() int { return len(x) } + +func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byName) Less(i, j int) bool { + if x[i].name != x[j].name { + return x[i].name < x[j].name + } + if len(x[i].index) != len(x[j].index) { + return len(x[i].index) < len(x[j].index) + } + if x[i].tag != x[j].tag { + return x[i].tag + } + return byIndex(x).Less(i, j) +} + +// byIndex sorts field by index sequence. +type byIndex []field + +func (x byIndex) Len() int { return len(x) } + +func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x byIndex) Less(i, j int) bool { + for k, xik := range x[i].index { + if k >= len(x[j].index) { + return false + } + if xik != x[j].index[k] { + return xik < x[j].index[k] + } + } + return len(x[i].index) < len(x[j].index) +} + +// typeFields returns a list of fields that should be recognized for the given type. +// The algorithm is breadth-first search over the set of structs to include - the top struct +// and then any reachable anonymous structs. +func typeFields(t reflect.Type) []field { + // Anonymous fields to explore at the current level and the next. + current := []field{} + next := []field{{typ: t}} + + // Count of queued names for current level and the next. + count := map[reflect.Type]int{} + nextCount := map[reflect.Type]int{} + + // Types already visited at an earlier level. + visited := map[reflect.Type]bool{} + + // Fields found. + var fields []field + + for len(next) > 0 { + current, next = next, current[:0] + count, nextCount = nextCount, map[reflect.Type]int{} + + for _, f := range current { + if visited[f.typ] { + continue + } + visited[f.typ] = true + + // Scan f.typ for fields to include. + for i := 0; i < f.typ.NumField(); i++ { + sf := f.typ.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + // Extract field name from tag + tag := getTag(sf) + if tag == "-" { + continue + } + name, opts := parseTag(tag) + name, compoundIndex, isCompound := parseCompoundIndex(name) + if !isValidTag(name) { + name = "" + } + // Extract referenced field from tags + refTag := getRefTag(sf) + ref, _ := parseTag(refTag) + if !isValidTag(ref) { + ref = "" + } + + index := make([]int, len(f.index)+1) + copy(index, f.index) + index[len(f.index)] = i + + ft := sf.Type + if ft.Name() == "" && ft.Kind() == reflect.Ptr { + // Follow pointer. + ft = ft.Elem() + } + + // Record found field and index sequence. + if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct || isPseudoType(ft) { + tagged := name != "" + if name == "" { + name = sf.Name + } + fields = append(fields, fillField(field{ + name: name, + tag: tagged, + index: index, + typ: ft, + omitEmpty: opts.Contains("omitempty"), + reference: opts.Contains("reference"), + refName: ref, + compound: isCompound, + compoundIndex: compoundIndex, + })) + if count[f.typ] > 1 { + // If there were multiple instances, add a second, + // so that the annihilation code will see a duplicate. + // It only cares about the distinction between 1 or 2, + // so don't bother generating any more copies. + fields = append(fields, fields[len(fields)-1]) + } + continue + } + + // Record new anonymous struct to explore in next round. + nextCount[ft]++ + if nextCount[ft] == 1 { + next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft})) + } + } + } + } + + sort.Sort(byName(fields)) + + // Delete all fields that are hidden by the Go rules for embedded fields, + // except that fields with valid tags are promoted. + + // The fields are sorted in primary order of name, secondary order + // of field index length. Loop over names; for each name, delete + // hidden fields by choosing the one dominant field that survives. + out := fields[:0] + for advance, i := 0, 0; i < len(fields); i += advance { + // One iteration per name. + // Find the sequence of fields with the name of this first field. + fi := fields[i] + for advance = 1; i+advance < len(fields); advance++ { + fj := fields[i+advance] + if fj.name != fi.name { + break + } + if fi.compound && fj.compound && fi.compoundIndex != fj.compoundIndex { + break + } + + } + if advance == 1 { // Only one field with this name + out = append(out, fi) + continue + } + dominant, ok := dominantField(fields[i : i+advance]) + if ok { + out = append(out, dominant) + } + } + + fields = out + sort.Sort(byIndex(fields)) + + return fields +} + +func isPseudoType(t reflect.Type) bool { + return t == reflect.TypeOf(time.Time{}) +} + +// dominantField looks through the fields, all of which are known to +// have the same name, to find the single field that dominates the +// others using Go's embedding rules, modified by the presence of +// valid tags. If there are multiple top-level fields, the boolean +// will be false: This condition is an error in Go and we skip all +// the fields. +func dominantField(fields []field) (field, bool) { + // The fields are sorted in increasing index-length order. The winner + // must therefore be one with the shortest index length. Drop all + // longer entries, which is easy: just truncate the slice. + length := len(fields[0].index) + tagged := -1 // Index of first tagged field. + for i, f := range fields { + if len(f.index) > length { + fields = fields[:i] + break + } + if f.tag { + if tagged >= 0 { + // Multiple tagged fields at the same level: conflict. + // Return no field. + return field{}, false + } + tagged = i + } + } + if tagged >= 0 { + return fields[tagged], true + } + // All remaining fields have the same length. If there's more than one, + // we have a conflict (two fields named "X" at the same level) and we + // return no field. + if len(fields) > 1 { + return field{}, false + } + return fields[0], true +} + +var fieldCache struct { + sync.RWMutex + m map[reflect.Type][]field +} + +// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. +func cachedTypeFields(t reflect.Type) []field { + fieldCache.RLock() + f := fieldCache.m[t] + fieldCache.RUnlock() + if f != nil { + return f + } + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = typeFields(t) + if f == nil { + f = []field{} + } + + fieldCache.Lock() + if fieldCache.m == nil { + fieldCache.m = map[reflect.Type][]field{} + } + fieldCache.m[t] = f + fieldCache.Unlock() + return f +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder.go new file mode 100644 index 0000000..e59ca04 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder.go @@ -0,0 +1,152 @@ +package encoding + +import ( + "errors" + "reflect" + "runtime" + "sync" +) + +var byteSliceType = reflect.TypeOf([]byte(nil)) + +type decoderFunc func(dv reflect.Value, sv reflect.Value) + +// Decode decodes map[string]interface{} into a struct. The first parameter +// must be a pointer. +func Decode(dst interface{}, src interface{}) (err error) { + return decode(dst, src, true) +} + +func Merge(dst interface{}, src interface{}) (err error) { + return decode(dst, src, false) +} + +func decode(dst interface{}, src interface{}, blank bool) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if v, ok := r.(string); ok { + err = errors.New(v) + } else { + err = r.(error) + } + } + }() + + dv := reflect.ValueOf(dst) + sv := reflect.ValueOf(src) + if dv.Kind() != reflect.Ptr { + return &DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + Reason: "must be a pointer", + } + } + + dv = dv.Elem() + if !dv.CanAddr() { + return &DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + Reason: "must be addressable", + } + } + + decodeValue(dv, sv, blank) + return nil +} + +// decodeValue decodes the source value into the destination value +func decodeValue(dv, sv reflect.Value, blank bool) { + valueDecoder(dv, sv, blank)(dv, sv) +} + +type decoderCacheKey struct { + dt, st reflect.Type + blank bool +} + +var decoderCache struct { + sync.RWMutex + m map[decoderCacheKey]decoderFunc +} + +func valueDecoder(dv, sv reflect.Value, blank bool) decoderFunc { + if !sv.IsValid() { + return invalidValueDecoder + } + + if dv.IsValid() { + dv = indirect(dv, false) + if blank { + dv.Set(reflect.Zero(dv.Type())) + } + } + + return typeDecoder(dv.Type(), sv.Type(), blank) +} + +func typeDecoder(dt, st reflect.Type, blank bool) decoderFunc { + decoderCache.RLock() + f := decoderCache.m[decoderCacheKey{dt, st, blank}] + decoderCache.RUnlock() + if f != nil { + return f + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to be ready and then calls it. This indirect + // func is only used for recursive types. + decoderCache.Lock() + var wg sync.WaitGroup + wg.Add(1) + decoderCache.m[decoderCacheKey{dt, st, blank}] = func(dv, sv reflect.Value) { + wg.Wait() + f(dv, sv) + } + decoderCache.Unlock() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = newTypeDecoder(dt, st, blank) + wg.Done() + decoderCache.Lock() + decoderCache.m[decoderCacheKey{dt, st, blank}] = f + decoderCache.Unlock() + return f +} + +// indirect walks down v allocating pointers as needed, +// until it gets to a non-pointer. +func indirect(v reflect.Value, decodeNull bool) reflect.Value { + // If v is a named type and is addressable, + // start with its address, so that if the type has pointer methods, + // we find them. + if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { + v = v.Addr() + } + for { + // Load value from interface, but only if the result will be + // usefully addressable. + if v.Kind() == reflect.Interface && !v.IsNil() { + e := v.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodeNull || e.Elem().Kind() == reflect.Ptr) { + v = e + continue + } + } + + if v.Kind() != reflect.Ptr { + break + } + + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + return v +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder_types.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder_types.go new file mode 100644 index 0000000..bf398f0 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/decoder_types.go @@ -0,0 +1,555 @@ +package encoding + +import ( + "bytes" + "fmt" + "reflect" + "strconv" +) + +// newTypeDecoder constructs an decoderFunc for a type. +func newTypeDecoder(dt, st reflect.Type, blank bool) decoderFunc { + if reflect.PtrTo(dt).Implements(unmarshalerType) || + dt.Implements(unmarshalerType) { + return unmarshalerDecoder + } + + if st.Kind() == reflect.Interface { + return newInterfaceAsTypeDecoder(blank) + } + + switch dt.Kind() { + case reflect.Bool: + switch st.Kind() { + case reflect.Bool: + return boolAsBoolDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsBoolDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsBoolDecoder + case reflect.Float32, reflect.Float64: + return floatAsBoolDecoder + case reflect.String: + return stringAsBoolDecoder + default: + return decodeTypeError + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + switch st.Kind() { + case reflect.Bool: + return boolAsIntDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsIntDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsIntDecoder + case reflect.Float32, reflect.Float64: + return floatAsIntDecoder + case reflect.String: + return stringAsIntDecoder + default: + return decodeTypeError + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch st.Kind() { + case reflect.Bool: + return boolAsUintDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsUintDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsUintDecoder + case reflect.Float32, reflect.Float64: + return floatAsUintDecoder + case reflect.String: + return stringAsUintDecoder + default: + return decodeTypeError + } + case reflect.Float32, reflect.Float64: + switch st.Kind() { + case reflect.Bool: + return boolAsFloatDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsFloatDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsFloatDecoder + case reflect.Float32, reflect.Float64: + return floatAsFloatDecoder + case reflect.String: + return stringAsFloatDecoder + default: + return decodeTypeError + } + case reflect.String: + switch st.Kind() { + case reflect.Bool: + return boolAsStringDecoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intAsStringDecoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintAsStringDecoder + case reflect.Float32, reflect.Float64: + return floatAsStringDecoder + case reflect.String: + return stringAsStringDecoder + default: + return decodeTypeError + } + case reflect.Interface: + if !st.AssignableTo(dt) { + return decodeTypeError + } + + return interfaceDecoder + case reflect.Ptr: + return newPtrDecoder(dt, st, blank) + case reflect.Map: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Map: + return newMapAsMapDecoder(dt, st, blank) + default: + return decodeTypeError + } + case reflect.Struct: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Map: + if kind := st.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return newDecodeTypeError(fmt.Errorf("map needs string keys")) + } + + return newMapAsStructDecoder(dt, st, blank) + default: + return decodeTypeError + } + case reflect.Slice: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Array, reflect.Slice: + return newSliceDecoder(dt, st) + default: + return decodeTypeError + } + case reflect.Array: + if st.AssignableTo(dt) { + return interfaceDecoder + } + + switch st.Kind() { + case reflect.Array, reflect.Slice: + return newArrayDecoder(dt, st) + default: + return decodeTypeError + } + default: + return unsupportedTypeDecoder + } +} + +func invalidValueDecoder(dv, sv reflect.Value) { + dv.Set(reflect.Zero(dv.Type())) +} + +func unsupportedTypeDecoder(dv, sv reflect.Value) { + panic(&UnsupportedTypeError{dv.Type()}) +} + +func decodeTypeError(dv, sv reflect.Value) { + panic(&DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + }) +} + +func newDecodeTypeError(err error) decoderFunc { + return func(dv, sv reflect.Value) { + panic(&DecodeTypeError{ + DestType: dv.Type(), + SrcType: sv.Type(), + Reason: err.Error(), + }) + } +} + +func interfaceDecoder(dv, sv reflect.Value) { + dv.Set(sv) +} + +func newInterfaceAsTypeDecoder(blank bool) decoderFunc { + return func(dv, sv reflect.Value) { + if !sv.IsNil() { + dv = indirect(dv, false) + if blank { + dv.Set(reflect.Zero(dv.Type())) + } + decodeValue(dv, sv.Elem(), blank) + } + } +} + +type ptrDecoder struct { + elemDec decoderFunc +} + +func (d *ptrDecoder) decode(dv, sv reflect.Value) { + v := reflect.New(dv.Type().Elem()) + d.elemDec(v, sv) + dv.Set(v) +} + +func newPtrDecoder(dt, st reflect.Type, blank bool) decoderFunc { + dec := &ptrDecoder{typeDecoder(dt.Elem(), st, blank)} + + return dec.decode +} + +func unmarshalerDecoder(dv, sv reflect.Value) { + // modeled off of https://golang.org/src/encoding/json/decode.go?#L325 + if dv.Kind() != reflect.Ptr && dv.Type().Name() != "" && dv.CanAddr() { + dv = dv.Addr() + } + + if dv.IsNil() { + dv.Set(reflect.New(dv.Type().Elem())) + } + + u := dv.Interface().(Unmarshaler) + err := u.UnmarshalRQL(sv.Interface()) + if err != nil { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} + +// Boolean decoders + +func boolAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Bool()) +} +func boolAsIntDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetInt(1) + } else { + dv.SetInt(0) + } +} +func boolAsUintDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetUint(1) + } else { + dv.SetUint(0) + } +} +func boolAsFloatDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetFloat(1) + } else { + dv.SetFloat(0) + } +} +func boolAsStringDecoder(dv, sv reflect.Value) { + if sv.Bool() { + dv.SetString("1") + } else { + dv.SetString("0") + } +} + +// Int decoders + +func intAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Int() != 0) +} +func intAsIntDecoder(dv, sv reflect.Value) { + dv.SetInt(sv.Int()) +} +func intAsUintDecoder(dv, sv reflect.Value) { + dv.SetUint(uint64(sv.Int())) +} +func intAsFloatDecoder(dv, sv reflect.Value) { + dv.SetFloat(float64(sv.Int())) +} +func intAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(strconv.FormatInt(sv.Int(), 10)) +} + +// Uint decoders + +func uintAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Uint() != 0) +} +func uintAsIntDecoder(dv, sv reflect.Value) { + dv.SetInt(int64(sv.Uint())) +} +func uintAsUintDecoder(dv, sv reflect.Value) { + dv.SetUint(sv.Uint()) +} +func uintAsFloatDecoder(dv, sv reflect.Value) { + dv.SetFloat(float64(sv.Uint())) +} +func uintAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(strconv.FormatUint(sv.Uint(), 10)) +} + +// Float decoders + +func floatAsBoolDecoder(dv, sv reflect.Value) { + dv.SetBool(sv.Float() != 0) +} +func floatAsIntDecoder(dv, sv reflect.Value) { + dv.SetInt(int64(sv.Float())) +} +func floatAsUintDecoder(dv, sv reflect.Value) { + dv.SetUint(uint64(sv.Float())) +} +func floatAsFloatDecoder(dv, sv reflect.Value) { + dv.SetFloat(float64(sv.Float())) +} +func floatAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(strconv.FormatFloat(sv.Float(), 'f', -1, 64)) +} + +// String decoders + +func stringAsBoolDecoder(dv, sv reflect.Value) { + b, err := strconv.ParseBool(sv.String()) + if err == nil { + dv.SetBool(b) + } else if sv.String() == "" { + dv.SetBool(false) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsIntDecoder(dv, sv reflect.Value) { + i, err := strconv.ParseInt(sv.String(), 0, dv.Type().Bits()) + if err == nil { + dv.SetInt(i) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsUintDecoder(dv, sv reflect.Value) { + i, err := strconv.ParseUint(sv.String(), 0, dv.Type().Bits()) + if err == nil { + dv.SetUint(i) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsFloatDecoder(dv, sv reflect.Value) { + f, err := strconv.ParseFloat(sv.String(), dv.Type().Bits()) + if err == nil { + dv.SetFloat(f) + } else { + panic(&DecodeTypeError{dv.Type(), sv.Type(), err.Error()}) + } +} +func stringAsStringDecoder(dv, sv reflect.Value) { + dv.SetString(sv.String()) +} + +// Slice/Array decoder + +type sliceDecoder struct { + arrayDec decoderFunc +} + +func (d *sliceDecoder) decode(dv, sv reflect.Value) { + if dv.Kind() == reflect.Slice { + dv.Set(reflect.MakeSlice(dv.Type(), dv.Len(), dv.Cap())) + } + + if !sv.IsNil() { + d.arrayDec(dv, sv) + } +} + +func newSliceDecoder(dt, st reflect.Type) decoderFunc { + dec := &sliceDecoder{newArrayDecoder(dt, st)} + return dec.decode +} + +type arrayDecoder struct { + elemDec decoderFunc +} + +func (d *arrayDecoder) decode(dv, sv reflect.Value) { + // Iterate through the slice/array and decode each element before adding it + // to the dest slice/array + i := 0 + for i < sv.Len() { + if dv.Kind() == reflect.Slice { + // Get element of array, growing if necessary. + if i >= dv.Cap() { + newcap := dv.Cap() + dv.Cap()/2 + if newcap < 4 { + newcap = 4 + } + newdv := reflect.MakeSlice(dv.Type(), dv.Len(), newcap) + reflect.Copy(newdv, dv) + dv.Set(newdv) + } + if i >= dv.Len() { + dv.SetLen(i + 1) + } + } + + if i < dv.Len() { + // Decode into element. + d.elemDec(dv.Index(i), sv.Index(i)) + } + + i++ + } + + // Ensure that the destination is the correct size + if i < dv.Len() { + if dv.Kind() == reflect.Array { + // Array. Zero the rest. + z := reflect.Zero(dv.Type().Elem()) + for ; i < dv.Len(); i++ { + dv.Index(i).Set(z) + } + } else { + dv.SetLen(i) + } + } +} + +func newArrayDecoder(dt, st reflect.Type) decoderFunc { + dec := &arrayDecoder{typeDecoder(dt.Elem(), st.Elem(), true)} + return dec.decode +} + +// Map decoder + +type mapAsMapDecoder struct { + keyDec, elemDec decoderFunc + blank bool +} + +func (d *mapAsMapDecoder) decode(dv, sv reflect.Value) { + dt := dv.Type() + if d.blank { + dv.Set(reflect.MakeMap(reflect.MapOf(dt.Key(), dt.Elem()))) + } + + var mapKey reflect.Value + var mapElem reflect.Value + + keyType := dv.Type().Key() + elemType := dv.Type().Elem() + + for _, sElemKey := range sv.MapKeys() { + var dElemKey reflect.Value + var dElemVal reflect.Value + + if !mapKey.IsValid() { + mapKey = reflect.New(keyType).Elem() + } else { + mapKey.Set(reflect.Zero(keyType)) + } + dElemKey = mapKey + + if !mapElem.IsValid() { + mapElem = reflect.New(elemType).Elem() + } else { + mapElem.Set(reflect.Zero(elemType)) + } + dElemVal = mapElem + + d.keyDec(dElemKey, sElemKey) + d.elemDec(dElemVal, sv.MapIndex(sElemKey)) + + dv.SetMapIndex(dElemKey, dElemVal) + } +} + +func newMapAsMapDecoder(dt, st reflect.Type, blank bool) decoderFunc { + d := &mapAsMapDecoder{typeDecoder(dt.Key(), st.Key(), blank), typeDecoder(dt.Elem(), st.Elem(), blank), blank} + return d.decode +} + +type mapAsStructDecoder struct { + fields []field + fieldDecs []decoderFunc + blank bool +} + +func (d *mapAsStructDecoder) decode(dv, sv reflect.Value) { + for _, kv := range sv.MapKeys() { + var f *field + var compoundFields = []*field{} + var fieldDec decoderFunc + key := []byte(kv.String()) + for i := range d.fields { + ff := &d.fields[i] + ffd := d.fieldDecs[i] + + if bytes.Equal(ff.nameBytes, key) { + f = ff + fieldDec = ffd + if ff.compound { + compoundFields = append(compoundFields, ff) + } + } + if f == nil && ff.equalFold(ff.nameBytes, key) { + f = ff + fieldDec = ffd + if ff.compound { + compoundFields = append(compoundFields, ff) + } + } + } + + if len(compoundFields) > 0 { + for _, compoundField := range compoundFields { + dElemVal := fieldByIndex(dv, compoundField.index) + sElemVal := sv.MapIndex(kv) + + if sElemVal.Kind() == reflect.Interface { + sElemVal = sElemVal.Elem() + } + sElemVal = sElemVal.Index(compoundField.compoundIndex) + fieldDec = typeDecoder(dElemVal.Type(), sElemVal.Type(), d.blank) + + if !sElemVal.IsValid() || !dElemVal.CanSet() { + continue + } + + fieldDec(dElemVal, sElemVal) + } + } else if f != nil { + dElemVal := fieldByIndex(dv, f.index) + sElemVal := sv.MapIndex(kv) + + if !sElemVal.IsValid() || !dElemVal.CanSet() { + continue + } + + fieldDec(dElemVal, sElemVal) + } + } +} + +func newMapAsStructDecoder(dt, st reflect.Type, blank bool) decoderFunc { + fields := cachedTypeFields(dt) + se := &mapAsStructDecoder{ + fields: fields, + fieldDecs: make([]decoderFunc, len(fields)), + blank: blank, + } + for i, f := range fields { + se.fieldDecs[i] = typeDecoder(typeByIndex(dt, f.index), st.Elem(), blank) + } + return se.decode +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder.go new file mode 100644 index 0000000..3b0d350 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder.go @@ -0,0 +1,89 @@ +// This code is based on encoding/json and gorilla/schema + +package encoding + +import ( + "errors" + "reflect" + "runtime" + "sync" +) + +type encoderFunc func(v reflect.Value) interface{} + +// Encode returns the encoded value of v. +// +// Encode traverses the value v recursively and looks for structs. If a struct +// is found then it is checked for tagged fields and convert to +// map[string]interface{} +func Encode(v interface{}) (ev interface{}, err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + if v, ok := r.(string); ok { + err = errors.New(v) + } else { + err = r.(error) + } + } + }() + + return encode(reflect.ValueOf(v)), nil +} + +func encode(v reflect.Value) interface{} { + return valueEncoder(v)(v) +} + +var encoderCache struct { + sync.RWMutex + m map[reflect.Type]encoderFunc +} + +func valueEncoder(v reflect.Value) encoderFunc { + if !v.IsValid() { + return invalidValueEncoder + } + return typeEncoder(v.Type()) +} + +func typeEncoder(t reflect.Type) encoderFunc { + encoderCache.RLock() + f := encoderCache.m[t] + encoderCache.RUnlock() + if f != nil { + return f + } + + // To deal with recursive types, populate the map with an + // indirect func before we build it. This type waits on the + // real func (f) to + // be ready and then calls it. This indirect + // func is only used for recursive types. + encoderCache.Lock() + var wg sync.WaitGroup + wg.Add(1) + encoderCache.m[t] = func(v reflect.Value) interface{} { + wg.Wait() + return f(v) + } + encoderCache.Unlock() + + // Compute fields without lock. + // Might duplicate effort but won't hold other computations back. + f = newTypeEncoder(t, true) + wg.Done() + encoderCache.Lock() + encoderCache.m[t] = f + encoderCache.Unlock() + return f +} + +// IgnoreType causes the encoder to ignore a type when encoding +func IgnoreType(t reflect.Type) { + encoderCache.Lock() + encoderCache.m[t] = doNothingEncoder + encoderCache.Unlock() +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder_types.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder_types.go new file mode 100644 index 0000000..f57773b --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoder_types.go @@ -0,0 +1,410 @@ +package encoding + +import ( + "encoding/base64" + "fmt" + "math" + "reflect" + "time" +) + +// newTypeEncoder constructs an encoderFunc for a type. +// The returned encoder only checks CanAddr when allowAddr is true. +func newTypeEncoder(t reflect.Type, allowAddr bool) encoderFunc { + if t.Implements(marshalerType) { + return marshalerEncoder + } + if t.Kind() != reflect.Ptr && allowAddr { + if reflect.PtrTo(t).Implements(marshalerType) { + return newCondAddrEncoder(addrMarshalerEncoder, newTypeEncoder(t, false)) + } + } + + // Check for psuedo-types first + switch t { + case timeType: + return timePseudoTypeEncoder + } + + switch t.Kind() { + case reflect.Bool: + return boolEncoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return intEncoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return uintEncoder + case reflect.Float32, reflect.Float64: + return floatEncoder + case reflect.String: + return stringEncoder + case reflect.Interface: + return interfaceEncoder + case reflect.Struct: + return newStructEncoder(t) + case reflect.Map: + return newMapEncoder(t) + case reflect.Slice: + return newSliceEncoder(t) + case reflect.Array: + return newArrayEncoder(t) + case reflect.Ptr: + return newPtrEncoder(t) + case reflect.Func: + // functions are a special case as they can be used internally for + // optional arguments. Just return the raw function, if somebody tries + // to pass a function to the database the JSON marshaller will catch this + // anyway. + return funcEncoder + default: + return unsupportedTypeEncoder + } +} + +func invalidValueEncoder(v reflect.Value) interface{} { + return nil +} + +func doNothingEncoder(v reflect.Value) interface{} { + return v.Interface() +} + +func marshalerEncoder(v reflect.Value) interface{} { + if v.Kind() == reflect.Ptr && v.IsNil() { + return nil + } + m := v.Interface().(Marshaler) + ev, err := m.MarshalRQL() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + + return ev +} + +func addrMarshalerEncoder(v reflect.Value) interface{} { + va := v.Addr() + if va.IsNil() { + return nil + } + m := va.Interface().(Marshaler) + ev, err := m.MarshalRQL() + if err != nil { + panic(&MarshalerError{v.Type(), err}) + } + + return ev +} + +func boolEncoder(v reflect.Value) interface{} { + if v.Bool() { + return true + } else { + return false + } +} + +func intEncoder(v reflect.Value) interface{} { + return v.Int() +} + +func uintEncoder(v reflect.Value) interface{} { + return v.Uint() +} + +func floatEncoder(v reflect.Value) interface{} { + return v.Float() +} + +func stringEncoder(v reflect.Value) interface{} { + return v.String() +} + +func interfaceEncoder(v reflect.Value) interface{} { + if v.IsNil() { + return nil + } + return encode(v.Elem()) +} + +func funcEncoder(v reflect.Value) interface{} { + if v.IsNil() { + return nil + } + return v.Interface() +} + +func asStringEncoder(v reflect.Value) interface{} { + return fmt.Sprintf("%v", v.Interface()) +} + +func unsupportedTypeEncoder(v reflect.Value) interface{} { + panic(&UnsupportedTypeError{v.Type()}) +} + +type structEncoder struct { + fields []field + fieldEncs []encoderFunc +} + +func (se *structEncoder) encode(v reflect.Value) interface{} { + m := make(map[string]interface{}) + for i, f := range se.fields { + fv := fieldByIndex(v, f.index) + if !fv.IsValid() || f.omitEmpty && se.isEmptyValue(fv) { + continue + } + + encField := se.fieldEncs[i](fv) + + // If this field is a referenced field then attempt to extract the value. + if f.reference { + // Override the encoded field with the referenced field + encField = getReferenceField(f, v, encField) + } + + if f.compound { + compoundField, ok := m[f.name].([]interface{}) + if !ok { + compoundField = make([]interface{}, f.compoundIndex+1) + } else if len(compoundField) < f.compoundIndex+1 { + tmp := make([]interface{}, f.compoundIndex+1) + copy(tmp, compoundField) + compoundField = tmp + } + + compoundField[f.compoundIndex] = encField + encField = compoundField + } + + m[f.name] = encField + } + + return m +} + +func getReferenceField(f field, v reflect.Value, encField interface{}) interface{} { + refName := f.name + if f.refName != "" { + refName = f.refName + } + + encFields, isArray := encField.([]interface{}) + if isArray { + refVals := make([]interface{}, len(encFields)) + for i, e := range encFields { + refVals[i] = extractValue(e, v, f.name, refName) + } + return refVals + } + refVal := extractValue(encField, v, f.name, refName) + return refVal +} + +func extractValue(encField interface{}, v reflect.Value, name string, refName string) interface{} { + // referenced fields can only handle maps so return an error if the + // encoded field is of a different type + m, ok := encField.(map[string]interface{}) + if !ok { + err := fmt.Errorf("Error refing field %s in %s, expected object but got %t", refName, name, encField) + panic(&MarshalerError{v.Type(), err}) + } + refVal, ok := m[refName] + if !ok { + err := fmt.Errorf("Error refing field %s in %s, could not find referenced field", refName, name) + panic(&MarshalerError{v.Type(), err}) + } + return refVal +} + +func (se *structEncoder) isEmptyValue(v reflect.Value) bool { + if v.Type() == timeType { + return v.Interface().(time.Time) == time.Time{} + } + + return isEmptyValue(v) +} + +func newStructEncoder(t reflect.Type) encoderFunc { + fields := cachedTypeFields(t) + se := &structEncoder{ + fields: fields, + fieldEncs: make([]encoderFunc, len(fields)), + } + for i, f := range fields { + se.fieldEncs[i] = typeEncoder(typeByIndex(t, f.index)) + } + return se.encode +} + +type mapEncoder struct { + keyEnc, elemEnc encoderFunc +} + +func (me *mapEncoder) encode(v reflect.Value) interface{} { + if v.IsNil() { + return nil + } + + m := make(map[string]interface{}) + + for _, k := range v.MapKeys() { + m[me.keyEnc(k).(string)] = me.elemEnc(v.MapIndex(k)) + } + + return m +} + +func newMapEncoder(t reflect.Type) encoderFunc { + var keyEnc encoderFunc + switch t.Key().Kind() { + case reflect.Bool: + keyEnc = asStringEncoder + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + keyEnc = asStringEncoder + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + keyEnc = asStringEncoder + case reflect.Float32, reflect.Float64: + keyEnc = asStringEncoder + case reflect.String: + keyEnc = stringEncoder + case reflect.Interface: + keyEnc = asStringEncoder + default: + return unsupportedTypeEncoder + } + + me := &mapEncoder{keyEnc, typeEncoder(t.Elem())} + return me.encode +} + +// sliceEncoder just wraps an arrayEncoder, checking to make sure the value isn't nil. +type sliceEncoder struct { + arrayEnc encoderFunc +} + +func (se *sliceEncoder) encode(v reflect.Value) interface{} { + if v.IsNil() { + return []interface{}{} + } + return se.arrayEnc(v) +} + +func newSliceEncoder(t reflect.Type) encoderFunc { + // Byte slices get special treatment; arrays don't. + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteSlice + } + enc := &sliceEncoder{newArrayEncoder(t)} + return enc.encode +} + +type arrayEncoder struct { + elemEnc encoderFunc +} + +func (ae *arrayEncoder) encode(v reflect.Value) interface{} { + n := v.Len() + + a := make([]interface{}, n) + for i := 0; i < n; i++ { + a[i] = ae.elemEnc(v.Index(i)) + } + + return a +} + +func newArrayEncoder(t reflect.Type) encoderFunc { + if t.Elem().Kind() == reflect.Uint8 { + return encodeByteArray + } + enc := &arrayEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type ptrEncoder struct { + elemEnc encoderFunc +} + +func (pe *ptrEncoder) encode(v reflect.Value) interface{} { + if v.IsNil() { + return nil + } + return pe.elemEnc(v.Elem()) +} + +func newPtrEncoder(t reflect.Type) encoderFunc { + enc := &ptrEncoder{typeEncoder(t.Elem())} + return enc.encode +} + +type condAddrEncoder struct { + canAddrEnc, elseEnc encoderFunc +} + +func (ce *condAddrEncoder) encode(v reflect.Value) interface{} { + if v.CanAddr() { + return ce.canAddrEnc(v) + } else { + return ce.elseEnc(v) + } +} + +// newCondAddrEncoder returns an encoder that checks whether its value +// CanAddr and delegates to canAddrEnc if so, else to elseEnc. +func newCondAddrEncoder(canAddrEnc, elseEnc encoderFunc) encoderFunc { + enc := &condAddrEncoder{canAddrEnc: canAddrEnc, elseEnc: elseEnc} + return enc.encode +} + +// Pseudo-type encoders + +// Encode a time.Time value to the TIME RQL type +func timePseudoTypeEncoder(v reflect.Value) interface{} { + t := v.Interface().(time.Time) + + timeVal := float64(t.UnixNano()) / float64(time.Second) + + // use seconds-since-epoch precision if time.Time `t` + // is before the oldest nanosecond time + if t.Before(time.Unix(0, math.MinInt64)) { + timeVal = float64(t.Unix()) + } + + return map[string]interface{}{ + "$reql_type$": "TIME", + "epoch_time": timeVal, + "timezone": t.Format("-07:00"), + } +} + +// Encode a byte slice to the BINARY RQL type +func encodeByteSlice(v reflect.Value) interface{} { + var b []byte + if !v.IsNil() { + b = v.Bytes() + } + + dst := make([]byte, base64.StdEncoding.EncodedLen(len(b))) + base64.StdEncoding.Encode(dst, b) + + return map[string]interface{}{ + "$reql_type$": "BINARY", + "data": string(dst), + } +} + +// Encode a byte array to the BINARY RQL type +func encodeByteArray(v reflect.Value) interface{} { + b := make([]byte, v.Len()) + for i := 0; i < v.Len(); i++ { + b[i] = v.Index(i).Interface().(byte) + } + + dst := make([]byte, base64.StdEncoding.EncodedLen(len(b))) + base64.StdEncoding.Encode(dst, b) + + return map[string]interface{}{ + "$reql_type$": "BINARY", + "data": string(dst), + } +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoding.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoding.go new file mode 100644 index 0000000..0169e14 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/encoding.go @@ -0,0 +1,32 @@ +package encoding + +import ( + "reflect" + "time" +) + +var ( + // type constants + stringType = reflect.TypeOf("") + timeType = reflect.TypeOf(new(time.Time)).Elem() + + marshalerType = reflect.TypeOf(new(Marshaler)).Elem() + unmarshalerType = reflect.TypeOf(new(Unmarshaler)).Elem() +) + +// Marshaler is the interface implemented by objects that +// can marshal themselves into a valid RQL psuedo-type. +type Marshaler interface { + MarshalRQL() (interface{}, error) +} + +// Unmarshaler is the interface implemented by objects +// that can unmarshal a psuedo-type object of themselves. +type Unmarshaler interface { + UnmarshalRQL(interface{}) error +} + +func init() { + encoderCache.m = make(map[reflect.Type]encoderFunc) + decoderCache.m = make(map[decoderCacheKey]decoderFunc) +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/errors.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/errors.go new file mode 100644 index 0000000..8b9ac2c --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/errors.go @@ -0,0 +1,102 @@ +package encoding + +import ( + "fmt" + "reflect" + "strings" +) + +type MarshalerError struct { + Type reflect.Type + Err error +} + +func (e *MarshalerError) Error() string { + return "gorethink: error calling MarshalRQL for type " + e.Type.String() + ": " + e.Err.Error() +} + +type InvalidUnmarshalError struct { + Type reflect.Type +} + +func (e *InvalidUnmarshalError) Error() string { + if e.Type == nil { + return "gorethink: UnmarshalRQL(nil)" + } + + if e.Type.Kind() != reflect.Ptr { + return "gorethink: UnmarshalRQL(non-pointer " + e.Type.String() + ")" + } + return "gorethink: UnmarshalRQL(nil " + e.Type.String() + ")" +} + +// An InvalidTypeError describes a value that was +// not appropriate for a value of a specific Go type. +type DecodeTypeError struct { + DestType, SrcType reflect.Type + Reason string +} + +func (e *DecodeTypeError) Error() string { + if e.Reason != "" { + return "gorethink: could not decode type " + e.SrcType.String() + " into Go value of type " + e.DestType.String() + ": " + e.Reason + } else { + return "gorethink: could not decode type " + e.SrcType.String() + " into Go value of type " + e.DestType.String() + + } +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unsupported value type. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "gorethink: unsupported type: " + e.Type.String() +} + +// An UnsupportedTypeError is returned by Marshal when attempting +// to encode an unexpected value type. +type UnexpectedTypeError struct { + DestType, SrcType reflect.Type +} + +func (e *UnexpectedTypeError) Error() string { + return "gorethink: expected type: " + e.DestType.String() + ", got " + e.SrcType.String() +} + +type UnsupportedValueError struct { + Value reflect.Value + Str string +} + +func (e *UnsupportedValueError) Error() string { + return "gorethink: unsupported value: " + e.Str +} + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/fold.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/fold.go new file mode 100644 index 0000000..21c9e68 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/fold.go @@ -0,0 +1,139 @@ +package encoding + +import ( + "bytes" + "unicode/utf8" +) + +const ( + caseMask = ^byte(0x20) // Mask to ignore case in ASCII. + kelvin = '\u212a' + smallLongEss = '\u017f' +) + +// foldFunc returns one of four different case folding equivalence +// functions, from most general (and slow) to fastest: +// +// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8 +// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S') +// 3) asciiEqualFold, no special, but includes non-letters (including _) +// 4) simpleLetterEqualFold, no specials, no non-letters. +// +// The letters S and K are special because they map to 3 runes, not just 2: +// * S maps to s and to U+017F 'ſ' Latin small letter long s +// * k maps to K and to U+212A 'K' Kelvin sign +// See http://play.golang.org/p/tTxjOc0OGo +// +// The returned function is specialized for matching against s and +// should only be given s. It's not curried for performance reasons. +func foldFunc(s []byte) func(s, t []byte) bool { + nonLetter := false + special := false // special letter + for _, b := range s { + if b >= utf8.RuneSelf { + return bytes.EqualFold + } + upper := b & caseMask + if upper < 'A' || upper > 'Z' { + nonLetter = true + } else if upper == 'K' || upper == 'S' { + // See above for why these letters are special. + special = true + } + } + if special { + return equalFoldRight + } + if nonLetter { + return asciiEqualFold + } + return simpleLetterEqualFold +} + +// equalFoldRight is a specialization of bytes.EqualFold when s is +// known to be all ASCII (including punctuation), but contains an 's', +// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t. +// See comments on foldFunc. +func equalFoldRight(s, t []byte) bool { + for _, sb := range s { + if len(t) == 0 { + return false + } + tb := t[0] + if tb < utf8.RuneSelf { + if sb != tb { + sbUpper := sb & caseMask + if 'A' <= sbUpper && sbUpper <= 'Z' { + if sbUpper != tb&caseMask { + return false + } + } else { + return false + } + } + t = t[1:] + continue + } + // sb is ASCII and t is not. t must be either kelvin + // sign or long s; sb must be s, S, k, or K. + tr, size := utf8.DecodeRune(t) + switch sb { + case 's', 'S': + if tr != smallLongEss { + return false + } + case 'k', 'K': + if tr != kelvin { + return false + } + default: + return false + } + t = t[size:] + + } + if len(t) > 0 { + return false + } + return true +} + +// asciiEqualFold is a specialization of bytes.EqualFold for use when +// s is all ASCII (but may contain non-letters) and contains no +// special-folding letters. +// See comments on foldFunc. +func asciiEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, sb := range s { + tb := t[i] + if sb == tb { + continue + } + if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') { + if sb&caseMask != tb&caseMask { + return false + } + } else { + return false + } + } + return true +} + +// simpleLetterEqualFold is a specialization of bytes.EqualFold for +// use when s is all ASCII letters (no underscores, etc) and also +// doesn't contain 'k', 'K', 's', or 'S'. +// See comments on foldFunc. +func simpleLetterEqualFold(s, t []byte) bool { + if len(s) != len(t) { + return false + } + for i, b := range s { + if b&caseMask != t[i]&caseMask { + return false + } + } + return true +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/tags.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/tags.go new file mode 100644 index 0000000..7999457 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/tags.go @@ -0,0 +1,104 @@ +// This code is based on encoding/json and gorilla/schema + +package encoding + +import ( + "reflect" + "strconv" + "strings" + "unicode" +) + +var ( + Tags []string +) + +const ( + TagName = "gorethink" + JSONTagName = "json" + RefTagName = "gorethink_ref" +) + +// tagOptions is the string following a comma in a struct field's +// tag, or the empty string. It does not include the leading comma. +type tagOptions string + +func getTag(sf reflect.StructField) string { + if Tags == nil { + return sf.Tag.Get(TagName) + } + + for _, tagName := range Tags { + if tag := sf.Tag.Get(tagName); tag != "" { + return tag + } + } + + return "" +} + +func getRefTag(sf reflect.StructField) string { + return sf.Tag.Get(RefTagName) +} + +// parseTag splits a struct field's tag into its name and +// comma-separated options. +func parseTag(tag string) (string, tagOptions) { + if idx := strings.Index(tag, ","); idx != -1 { + return tag[:idx], tagOptions(tag[idx+1:]) + } + return tag, tagOptions("") +} + +func parseCompoundIndex(tag string) (string, int, bool) { + lIdx := strings.Index(tag, "[") + rIdx := strings.Index(tag, "]") + if lIdx > 1 && rIdx > lIdx+1 { + if elemIndex_, err := strconv.ParseInt(tag[lIdx+1:rIdx], 10, 64); err == nil { + return tag[:lIdx], int(elemIndex_), true + } + } + + return tag, 0, false +} + +func isValidTag(s string) bool { + if s == "" { + return false + } + for _, c := range s { + switch { + case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): + // Backslash and quote chars are reserved, but + // otherwise any punctuation chars are allowed + // in a tag name. + default: + if !unicode.IsLetter(c) && !unicode.IsDigit(c) { + return false + } + } + } + return true +} + +// Contains returns whether checks that a comma-separated list of options +// contains a particular substr flag. substr must be surrounded by a +// string boundary or commas. +func (o tagOptions) Contains(optionName string) bool { + if len(o) == 0 { + return false + } + s := string(o) + for s != "" { + var next string + i := strings.Index(s, ",") + if i >= 0 { + s, next = s[:i], s[i+1:] + } + if s == optionName { + return true + } + s = next + } + return false +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/encoding/utils.go b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/utils.go new file mode 100644 index 0000000..0ca2c77 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/encoding/utils.go @@ -0,0 +1,72 @@ +package encoding + +import "reflect" + +func getTypeKind(t reflect.Type) reflect.Kind { + kind := t.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +func fieldByIndex(v reflect.Value, index []int) reflect.Value { + for _, i := range index { + if v.Kind() == reflect.Ptr { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + v = v.Field(i) + } + + return v +} + +func typeByIndex(t reflect.Type, index []int) reflect.Type { + for _, i := range index { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + t = t.Field(i).Type + } + return t +} + +// valueByString sorts reflect.Value by the string value, this is useful for +// sorting the result of MapKeys +type valueByString []reflect.Value + +func (x valueByString) Len() int { return len(x) } + +func (x valueByString) Swap(i, j int) { x[i], x[j] = x[j], x[i] } + +func (x valueByString) Less(i, j int) bool { + return x[i].String() < x[j].String() +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/ql2/generate.go b/vendor/gopkg.in/gorethink/gorethink.v2/ql2/generate.go new file mode 100644 index 0000000..0192eb0 --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/ql2/generate.go @@ -0,0 +1,3 @@ +//go:generate protoc --go_out=. ql2.proto + +package ql2 diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/ql2/ql2.pb.go b/vendor/gopkg.in/gorethink/gorethink.v2/ql2/ql2.pb.go new file mode 100644 index 0000000..759849f --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/ql2/ql2.pb.go @@ -0,0 +1,1681 @@ +// Code generated by protoc-gen-go. +// source: ql2.proto +// DO NOT EDIT! + +/* +Package ql2 is a generated protocol buffer package. + +It is generated from these files: + ql2.proto + +It has these top-level messages: + VersionDummy + Query + Frame + Backtrace + Response + Datum + Term +*/ +package ql2 + +import proto "github.com/golang/protobuf/proto" +import json "encoding/json" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = &json.SyntaxError{} +var _ = math.Inf + +// non-conforming protobuf libraries +// This enum contains the magic numbers for your version. See **THE HIGH-LEVEL +// VIEW** for what to do with it. +type VersionDummy_Version int32 + +const ( + VersionDummy_V0_1 VersionDummy_Version = 1063369270 + VersionDummy_V0_2 VersionDummy_Version = 1915781601 + VersionDummy_V0_3 VersionDummy_Version = 1601562686 + VersionDummy_V0_4 VersionDummy_Version = 1074539808 + VersionDummy_V1_0 VersionDummy_Version = 885177795 +) + +var VersionDummy_Version_name = map[int32]string{ + 1063369270: "V0_1", + 1915781601: "V0_2", + 1601562686: "V0_3", + 1074539808: "V0_4", + 885177795: "V1_0", +} +var VersionDummy_Version_value = map[string]int32{ + "V0_1": 1063369270, + "V0_2": 1915781601, + "V0_3": 1601562686, + "V0_4": 1074539808, + "V1_0": 885177795, +} + +func (x VersionDummy_Version) Enum() *VersionDummy_Version { + p := new(VersionDummy_Version) + *p = x + return p +} +func (x VersionDummy_Version) String() string { + return proto.EnumName(VersionDummy_Version_name, int32(x)) +} +func (x VersionDummy_Version) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *VersionDummy_Version) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(VersionDummy_Version_value, data, "VersionDummy_Version") + if err != nil { + return err + } + *x = VersionDummy_Version(value) + return nil +} + +// The protocol to use after the handshake, specified in V0_3 +type VersionDummy_Protocol int32 + +const ( + VersionDummy_PROTOBUF VersionDummy_Protocol = 656407617 + VersionDummy_JSON VersionDummy_Protocol = 2120839367 +) + +var VersionDummy_Protocol_name = map[int32]string{ + 656407617: "PROTOBUF", + 2120839367: "JSON", +} +var VersionDummy_Protocol_value = map[string]int32{ + "PROTOBUF": 656407617, + "JSON": 2120839367, +} + +func (x VersionDummy_Protocol) Enum() *VersionDummy_Protocol { + p := new(VersionDummy_Protocol) + *p = x + return p +} +func (x VersionDummy_Protocol) String() string { + return proto.EnumName(VersionDummy_Protocol_name, int32(x)) +} +func (x VersionDummy_Protocol) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *VersionDummy_Protocol) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(VersionDummy_Protocol_value, data, "VersionDummy_Protocol") + if err != nil { + return err + } + *x = VersionDummy_Protocol(value) + return nil +} + +type Query_QueryType int32 + +const ( + Query_START Query_QueryType = 1 + Query_CONTINUE Query_QueryType = 2 + // (see [Response]). + Query_STOP Query_QueryType = 3 + Query_NOREPLY_WAIT Query_QueryType = 4 + Query_SERVER_INFO Query_QueryType = 5 +) + +var Query_QueryType_name = map[int32]string{ + 1: "START", + 2: "CONTINUE", + 3: "STOP", + 4: "NOREPLY_WAIT", + 5: "SERVER_INFO", +} +var Query_QueryType_value = map[string]int32{ + "START": 1, + "CONTINUE": 2, + "STOP": 3, + "NOREPLY_WAIT": 4, + "SERVER_INFO": 5, +} + +func (x Query_QueryType) Enum() *Query_QueryType { + p := new(Query_QueryType) + *p = x + return p +} +func (x Query_QueryType) String() string { + return proto.EnumName(Query_QueryType_name, int32(x)) +} +func (x Query_QueryType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Query_QueryType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Query_QueryType_value, data, "Query_QueryType") + if err != nil { + return err + } + *x = Query_QueryType(value) + return nil +} + +type Frame_FrameType int32 + +const ( + Frame_POS Frame_FrameType = 1 + Frame_OPT Frame_FrameType = 2 +) + +var Frame_FrameType_name = map[int32]string{ + 1: "POS", + 2: "OPT", +} +var Frame_FrameType_value = map[string]int32{ + "POS": 1, + "OPT": 2, +} + +func (x Frame_FrameType) Enum() *Frame_FrameType { + p := new(Frame_FrameType) + *p = x + return p +} +func (x Frame_FrameType) String() string { + return proto.EnumName(Frame_FrameType_name, int32(x)) +} +func (x Frame_FrameType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Frame_FrameType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Frame_FrameType_value, data, "Frame_FrameType") + if err != nil { + return err + } + *x = Frame_FrameType(value) + return nil +} + +type Response_ResponseType int32 + +const ( + // These response types indicate success. + Response_SUCCESS_ATOM Response_ResponseType = 1 + Response_SUCCESS_SEQUENCE Response_ResponseType = 2 + Response_SUCCESS_PARTIAL Response_ResponseType = 3 + // datatypes. If you send a [CONTINUE] query with + // the same token as this response, you will get + // more of the sequence. Keep sending [CONTINUE] + // queries until you get back [SUCCESS_SEQUENCE]. + Response_WAIT_COMPLETE Response_ResponseType = 4 + Response_SERVER_INFO Response_ResponseType = 5 + // These response types indicate failure. + Response_CLIENT_ERROR Response_ResponseType = 16 + // client sends a malformed protobuf, or tries to + // send [CONTINUE] for an unknown token. + Response_COMPILE_ERROR Response_ResponseType = 17 + // checking. For example, if you pass too many + // arguments to a function. + Response_RUNTIME_ERROR Response_ResponseType = 18 +) + +var Response_ResponseType_name = map[int32]string{ + 1: "SUCCESS_ATOM", + 2: "SUCCESS_SEQUENCE", + 3: "SUCCESS_PARTIAL", + 4: "WAIT_COMPLETE", + 5: "SERVER_INFO", + 16: "CLIENT_ERROR", + 17: "COMPILE_ERROR", + 18: "RUNTIME_ERROR", +} +var Response_ResponseType_value = map[string]int32{ + "SUCCESS_ATOM": 1, + "SUCCESS_SEQUENCE": 2, + "SUCCESS_PARTIAL": 3, + "WAIT_COMPLETE": 4, + "SERVER_INFO": 5, + "CLIENT_ERROR": 16, + "COMPILE_ERROR": 17, + "RUNTIME_ERROR": 18, +} + +func (x Response_ResponseType) Enum() *Response_ResponseType { + p := new(Response_ResponseType) + *p = x + return p +} +func (x Response_ResponseType) String() string { + return proto.EnumName(Response_ResponseType_name, int32(x)) +} +func (x Response_ResponseType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Response_ResponseType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Response_ResponseType_value, data, "Response_ResponseType") + if err != nil { + return err + } + *x = Response_ResponseType(value) + return nil +} + +// If `ResponseType` is `RUNTIME_ERROR`, this may be filled in with more +// information about the error. +type Response_ErrorType int32 + +const ( + Response_INTERNAL Response_ErrorType = 1000000 + Response_RESOURCE_LIMIT Response_ErrorType = 2000000 + Response_QUERY_LOGIC Response_ErrorType = 3000000 + Response_NON_EXISTENCE Response_ErrorType = 3100000 + Response_OP_FAILED Response_ErrorType = 4100000 + Response_OP_INDETERMINATE Response_ErrorType = 4200000 + Response_USER Response_ErrorType = 5000000 + Response_PERMISSION_ERROR Response_ErrorType = 6000000 +) + +var Response_ErrorType_name = map[int32]string{ + 1000000: "INTERNAL", + 2000000: "RESOURCE_LIMIT", + 3000000: "QUERY_LOGIC", + 3100000: "NON_EXISTENCE", + 4100000: "OP_FAILED", + 4200000: "OP_INDETERMINATE", + 5000000: "USER", + 6000000: "PERMISSION_ERROR", +} +var Response_ErrorType_value = map[string]int32{ + "INTERNAL": 1000000, + "RESOURCE_LIMIT": 2000000, + "QUERY_LOGIC": 3000000, + "NON_EXISTENCE": 3100000, + "OP_FAILED": 4100000, + "OP_INDETERMINATE": 4200000, + "USER": 5000000, + "PERMISSION_ERROR": 6000000, +} + +func (x Response_ErrorType) Enum() *Response_ErrorType { + p := new(Response_ErrorType) + *p = x + return p +} +func (x Response_ErrorType) String() string { + return proto.EnumName(Response_ErrorType_name, int32(x)) +} +func (x Response_ErrorType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Response_ErrorType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Response_ErrorType_value, data, "Response_ErrorType") + if err != nil { + return err + } + *x = Response_ErrorType(value) + return nil +} + +// ResponseNotes are used to provide information about the query +// response that may be useful for people writing drivers or ORMs. +// Currently all the notes we send indicate that a stream has certain +// special properties. +type Response_ResponseNote int32 + +const ( + // The stream is a changefeed stream (e.g. `r.table('test').changes()`). + Response_SEQUENCE_FEED Response_ResponseNote = 1 + // The stream is a point changefeed stream + // (e.g. `r.table('test').get(0).changes()`). + Response_ATOM_FEED Response_ResponseNote = 2 + // The stream is an order_by_limit changefeed stream + // (e.g. `r.table('test').order_by(index: 'id').limit(5).changes()`). + Response_ORDER_BY_LIMIT_FEED Response_ResponseNote = 3 + // The stream is a union of multiple changefeed types that can't be + // collapsed to a single type + // (e.g. `r.table('test').changes().union(r.table('test').get(0).changes())`). + Response_UNIONED_FEED Response_ResponseNote = 4 + // The stream is a changefeed stream and includes notes on what state + // the changefeed stream is in (e.g. objects of the form `{state: + // 'initializing'}`). + Response_INCLUDES_STATES Response_ResponseNote = 5 +) + +var Response_ResponseNote_name = map[int32]string{ + 1: "SEQUENCE_FEED", + 2: "ATOM_FEED", + 3: "ORDER_BY_LIMIT_FEED", + 4: "UNIONED_FEED", + 5: "INCLUDES_STATES", +} +var Response_ResponseNote_value = map[string]int32{ + "SEQUENCE_FEED": 1, + "ATOM_FEED": 2, + "ORDER_BY_LIMIT_FEED": 3, + "UNIONED_FEED": 4, + "INCLUDES_STATES": 5, +} + +func (x Response_ResponseNote) Enum() *Response_ResponseNote { + p := new(Response_ResponseNote) + *p = x + return p +} +func (x Response_ResponseNote) String() string { + return proto.EnumName(Response_ResponseNote_name, int32(x)) +} +func (x Response_ResponseNote) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Response_ResponseNote) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Response_ResponseNote_value, data, "Response_ResponseNote") + if err != nil { + return err + } + *x = Response_ResponseNote(value) + return nil +} + +type Datum_DatumType int32 + +const ( + Datum_R_NULL Datum_DatumType = 1 + Datum_R_BOOL Datum_DatumType = 2 + Datum_R_NUM Datum_DatumType = 3 + Datum_R_STR Datum_DatumType = 4 + Datum_R_ARRAY Datum_DatumType = 5 + Datum_R_OBJECT Datum_DatumType = 6 + // This [DatumType] will only be used if [accepts_r_json] is + // set to [true] in [Query]. [r_str] will be filled with a + // JSON encoding of the [Datum]. + Datum_R_JSON Datum_DatumType = 7 +) + +var Datum_DatumType_name = map[int32]string{ + 1: "R_NULL", + 2: "R_BOOL", + 3: "R_NUM", + 4: "R_STR", + 5: "R_ARRAY", + 6: "R_OBJECT", + 7: "R_JSON", +} +var Datum_DatumType_value = map[string]int32{ + "R_NULL": 1, + "R_BOOL": 2, + "R_NUM": 3, + "R_STR": 4, + "R_ARRAY": 5, + "R_OBJECT": 6, + "R_JSON": 7, +} + +func (x Datum_DatumType) Enum() *Datum_DatumType { + p := new(Datum_DatumType) + *p = x + return p +} +func (x Datum_DatumType) String() string { + return proto.EnumName(Datum_DatumType_name, int32(x)) +} +func (x Datum_DatumType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Datum_DatumType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Datum_DatumType_value, data, "Datum_DatumType") + if err != nil { + return err + } + *x = Datum_DatumType(value) + return nil +} + +type Term_TermType int32 + +const ( + // A RQL datum, stored in `datum` below. + Term_DATUM Term_TermType = 1 + Term_MAKE_ARRAY Term_TermType = 2 + // Evaluate the terms in [optargs] and make an object + Term_MAKE_OBJ Term_TermType = 3 + // Takes an integer representing a variable and returns the value stored + // in that variable. It's the responsibility of the client to translate + // from their local representation of a variable to a unique _non-negative_ + // integer for that variable. (We do it this way instead of letting + // clients provide variable names as strings to discourage + // variable-capturing client libraries, and because it's more efficient + // on the wire.) + Term_VAR Term_TermType = 10 + // Takes some javascript code and executes it. + Term_JAVASCRIPT Term_TermType = 11 + // STRING {timeout: !NUMBER} -> Function(*) + Term_UUID Term_TermType = 169 + // Takes an HTTP URL and gets it. If the get succeeds and + // returns valid JSON, it is converted into a DATUM + Term_HTTP Term_TermType = 153 + // Takes a string and throws an error with that message. + // Inside of a `default` block, you can omit the first + // argument to rethrow whatever error you catch (this is most + // useful as an argument to the `default` filter optarg). + Term_ERROR Term_TermType = 12 + // Takes nothing and returns a reference to the implicit variable. + Term_IMPLICIT_VAR Term_TermType = 13 + // * Data Operators + // Returns a reference to a database. + Term_DB Term_TermType = 14 + // Returns a reference to a table. + Term_TABLE Term_TermType = 15 + // STRING, {read_mode:STRING, identifier_format:STRING} -> Table + // Gets a single element from a table by its primary or a secondary key. + Term_GET Term_TermType = 16 + // Table, STRING -> NULL | Table, NUMBER -> NULL | + Term_GET_ALL Term_TermType = 78 + // Simple DATUM Ops + Term_EQ Term_TermType = 17 + Term_NE Term_TermType = 18 + Term_LT Term_TermType = 19 + Term_LE Term_TermType = 20 + Term_GT Term_TermType = 21 + Term_GE Term_TermType = 22 + Term_NOT Term_TermType = 23 + // ADD can either add two numbers or concatenate two arrays. + Term_ADD Term_TermType = 24 + Term_SUB Term_TermType = 25 + Term_MUL Term_TermType = 26 + Term_DIV Term_TermType = 27 + Term_MOD Term_TermType = 28 + Term_FLOOR Term_TermType = 183 + Term_CEIL Term_TermType = 184 + Term_ROUND Term_TermType = 185 + // DATUM Array Ops + // Append a single element to the end of an array (like `snoc`). + Term_APPEND Term_TermType = 29 + // Prepend a single element to the end of an array (like `cons`). + Term_PREPEND Term_TermType = 80 + // Remove the elements of one array from another array. + Term_DIFFERENCE Term_TermType = 95 + // DATUM Set Ops + // Set ops work on arrays. They don't use actual sets and thus have + // performance characteristics you would expect from arrays rather than + // from sets. All set operations have the post condition that they + // array they return contains no duplicate values. + Term_SET_INSERT Term_TermType = 88 + Term_SET_INTERSECTION Term_TermType = 89 + Term_SET_UNION Term_TermType = 90 + Term_SET_DIFFERENCE Term_TermType = 91 + Term_SLICE Term_TermType = 30 + Term_SKIP Term_TermType = 70 + Term_LIMIT Term_TermType = 71 + Term_OFFSETS_OF Term_TermType = 87 + Term_CONTAINS Term_TermType = 93 + // Stream/Object Ops + // Get a particular field from an object, or map that over a + // sequence. + Term_GET_FIELD Term_TermType = 31 + // | Sequence, STRING -> Sequence + // Return an array containing the keys of the object. + Term_KEYS Term_TermType = 94 + // Return an array containing the values of the object. + Term_VALUES Term_TermType = 186 + // Creates an object + Term_OBJECT Term_TermType = 143 + // Check whether an object contains all the specified fields, + // or filters a sequence so that all objects inside of it + // contain all the specified fields. + Term_HAS_FIELDS Term_TermType = 32 + // x.with_fields(...) <=> x.has_fields(...).pluck(...) + Term_WITH_FIELDS Term_TermType = 96 + // Get a subset of an object by selecting some attributes to preserve, + // or map that over a sequence. (Both pick and pluck, polymorphic.) + Term_PLUCK Term_TermType = 33 + // Get a subset of an object by selecting some attributes to discard, or + // map that over a sequence. (Both unpick and without, polymorphic.) + Term_WITHOUT Term_TermType = 34 + // Merge objects (right-preferential) + Term_MERGE Term_TermType = 35 + // Sequence Ops + // Get all elements of a sequence between two values. + // Half-open by default, but the openness of either side can be + // changed by passing 'closed' or 'open for `right_bound` or + // `left_bound`. + Term_BETWEEN_DEPRECATED Term_TermType = 36 + // With the newer version, clients should use `r.minval` and `r.maxval` for unboundedness + Term_BETWEEN Term_TermType = 182 + Term_REDUCE Term_TermType = 37 + Term_MAP Term_TermType = 38 + Term_FOLD Term_TermType = 187 + // Filter a sequence with either a function or a shortcut + // object (see API docs for details). The body of FILTER is + // wrapped in an implicit `.default(false)`, and you can + // change the default value by specifying the `default` + // optarg. If you make the default `r.error`, all errors + // caught by `default` will be rethrown as if the `default` + // did not exist. + Term_FILTER Term_TermType = 39 + // Sequence, OBJECT, {default:DATUM} -> Sequence + // Map a function over a sequence and then concatenate the results together. + Term_CONCAT_MAP Term_TermType = 40 + // Order a sequence based on one or more attributes. + Term_ORDER_BY Term_TermType = 41 + // Get all distinct elements of a sequence (like `uniq`). + Term_DISTINCT Term_TermType = 42 + // Count the number of elements in a sequence, or only the elements that match + // a given filter. + Term_COUNT Term_TermType = 43 + Term_IS_EMPTY Term_TermType = 86 + // Take the union of multiple sequences (preserves duplicate elements! (use distinct)). + Term_UNION Term_TermType = 44 + // Get the Nth element of a sequence. + Term_NTH Term_TermType = 45 + // do NTH or GET_FIELD depending on target object + Term_BRACKET Term_TermType = 170 + Term_INNER_JOIN Term_TermType = 48 + Term_OUTER_JOIN Term_TermType = 49 + // An inner-join that does an equality comparison on two attributes. + Term_EQ_JOIN Term_TermType = 50 + Term_ZIP Term_TermType = 72 + Term_RANGE Term_TermType = 173 + // Array Ops + // Insert an element in to an array at a given index. + Term_INSERT_AT Term_TermType = 82 + // Remove an element at a given index from an array. + Term_DELETE_AT Term_TermType = 83 + // ARRAY, NUMBER, NUMBER -> ARRAY + // Change the element at a given index of an array. + Term_CHANGE_AT Term_TermType = 84 + // Splice one array in to another array. + Term_SPLICE_AT Term_TermType = 85 + // * Type Ops + // Coerces a datum to a named type (e.g. "bool"). + // If you previously used `stream_to_array`, you should use this instead + // with the type "array". + Term_COERCE_TO Term_TermType = 51 + // Returns the named type of a datum (e.g. TYPE_OF(true) = "BOOL") + Term_TYPE_OF Term_TermType = 52 + // * Write Ops (the OBJECTs contain data about number of errors etc.) + // Updates all the rows in a selection. Calls its Function with the row + // to be updated, and then merges the result of that call. + Term_UPDATE Term_TermType = 53 + // SingleSelection, Function(1), {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | + // StreamSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT | + // SingleSelection, OBJECT, {non_atomic:BOOL, durability:STRING, return_changes:BOOL} -> OBJECT + // Deletes all the rows in a selection. + Term_DELETE Term_TermType = 54 + // Replaces all the rows in a selection. Calls its Function with the row + // to be replaced, and then discards it and stores the result of that + // call. + Term_REPLACE Term_TermType = 55 + // Inserts into a table. If `conflict` is replace, overwrites + // entries with the same primary key. If `conflict` is + // update, does an update on the entry. If `conflict` is + // error, or is omitted, conflicts will trigger an error. + Term_INSERT Term_TermType = 56 + // * Administrative OPs + // Creates a database with a particular name. + Term_DB_CREATE Term_TermType = 57 + // Drops a database with a particular name. + Term_DB_DROP Term_TermType = 58 + // Lists all the databases by name. (Takes no arguments) + Term_DB_LIST Term_TermType = 59 + // Creates a table with a particular name in a particular + // database. (You may omit the first argument to use the + // default database.) + Term_TABLE_CREATE Term_TermType = 60 + // Database, STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT + // STRING, {primary_key:STRING, shards:NUMBER, replicas:NUMBER, primary_replica_tag:STRING} -> OBJECT + // STRING, {primary_key:STRING, shards:NUMBER, replicas:OBJECT, primary_replica_tag:STRING} -> OBJECT + // Drops a table with a particular name from a particular + // database. (You may omit the first argument to use the + // default database.) + Term_TABLE_DROP Term_TermType = 61 + // STRING -> OBJECT + // Lists all the tables in a particular database. (You may + // omit the first argument to use the default database.) + Term_TABLE_LIST Term_TermType = 62 + // -> ARRAY + // Returns the row in the `rethinkdb.table_config` or `rethinkdb.db_config` table + // that corresponds to the given database or table. + Term_CONFIG Term_TermType = 174 + // Table -> SingleSelection + // Returns the row in the `rethinkdb.table_status` table that corresponds to the + // given table. + Term_STATUS Term_TermType = 175 + // Called on a table, waits for that table to be ready for read/write operations. + // Called on a database, waits for all of the tables in the database to be ready. + // Returns the corresponding row or rows from the `rethinkdb.table_status` table. + Term_WAIT Term_TermType = 177 + // Database -> OBJECT + // Generates a new config for the given table, or all tables in the given database + // The `shards` and `replicas` arguments are required. If `emergency_repair` is + // specified, it will enter a completely different mode of repairing a table + // which has lost half or more of its replicas. + Term_RECONFIGURE Term_TermType = 176 + // dry_run:BOOLEAN] + // } -> OBJECT + // Database|Table, {shards:NUMBER, replicas:OBJECT [, + // primary_replica_tag:STRING, + // nonvoting_replica_tags:ARRAY, + // dry_run:BOOLEAN] + // } -> OBJECT + // Table, {emergency_repair:STRING, dry_run:BOOLEAN} -> OBJECT + // Balances the table's shards but leaves everything else the same. Can also be + // applied to an entire database at once. + Term_REBALANCE Term_TermType = 179 + // Ensures that previously issued soft-durability writes are complete and + // written to disk. + Term_SYNC Term_TermType = 138 + // Set global, database, or table-specific permissions + Term_GRANT Term_TermType = 188 + // * Secondary indexes OPs + // Creates a new secondary index with a particular name and definition. + Term_INDEX_CREATE Term_TermType = 75 + // Drops a secondary index with a particular name from the specified table. + Term_INDEX_DROP Term_TermType = 76 + // Lists all secondary indexes on a particular table. + Term_INDEX_LIST Term_TermType = 77 + // Gets information about whether or not a set of indexes are ready to + // be accessed. Returns a list of objects that look like this: + // {index:STRING, ready:BOOL[, progress:NUMBER]} + Term_INDEX_STATUS Term_TermType = 139 + // Blocks until a set of indexes are ready to be accessed. Returns the + // same values INDEX_STATUS. + Term_INDEX_WAIT Term_TermType = 140 + // Renames the given index to a new name + Term_INDEX_RENAME Term_TermType = 156 + // * Control Operators + // Calls a function on data + Term_FUNCALL Term_TermType = 64 + // Executes its first argument, and returns its second argument if it + // got [true] or its third argument if it got [false] (like an `if` + // statement). + Term_BRANCH Term_TermType = 65 + // Returns true if any of its arguments returns true (short-circuits). + Term_OR Term_TermType = 66 + // Returns true if all of its arguments return true (short-circuits). + Term_AND Term_TermType = 67 + // Calls its Function with each entry in the sequence + // and executes the array of terms that Function returns. + Term_FOR_EACH Term_TermType = 68 + // An anonymous function. Takes an array of numbers representing + // variables (see [VAR] above), and a [Term] to execute with those in + // scope. Returns a function that may be passed an array of arguments, + // then executes the Term with those bound to the variable names. The + // user will never construct this directly. We use it internally for + // things like `map` which take a function. The "arity" of a [Function] is + // the number of arguments it takes. + // For example, here's what `_X_.map{|x| x+2}` turns into: + // Term { + // type = MAP; + // args = [_X_, + // Term { + // type = Function; + // args = [Term { + // type = DATUM; + // datum = Datum { + // type = R_ARRAY; + // r_array = [Datum { type = R_NUM; r_num = 1; }]; + // }; + // }, + // Term { + // type = ADD; + // args = [Term { + // type = VAR; + // args = [Term { + // type = DATUM; + // datum = Datum { type = R_NUM; + // r_num = 1}; + // }]; + // }, + // Term { + // type = DATUM; + // datum = Datum { type = R_NUM; r_num = 2; }; + // }]; + // }]; + // }]; + Term_FUNC Term_TermType = 69 + // Indicates to ORDER_BY that this attribute is to be sorted in ascending order. + Term_ASC Term_TermType = 73 + // Indicates to ORDER_BY that this attribute is to be sorted in descending order. + Term_DESC Term_TermType = 74 + // Gets info about anything. INFO is most commonly called on tables. + Term_INFO Term_TermType = 79 + // `a.match(b)` returns a match object if the string `a` + // matches the regular expression `b`. + Term_MATCH Term_TermType = 97 + // Change the case of a string. + Term_UPCASE Term_TermType = 141 + Term_DOWNCASE Term_TermType = 142 + // Select a number of elements from sequence with uniform distribution. + Term_SAMPLE Term_TermType = 81 + // Evaluates its first argument. If that argument returns + // NULL or throws an error related to the absence of an + // expected value (for instance, accessing a non-existent + // field or adding NULL to an integer), DEFAULT will either + // return its second argument or execute it if it's a + // function. If the second argument is a function, it will be + // passed either the text of the error or NULL as its + // argument. + Term_DEFAULT Term_TermType = 92 + // Parses its first argument as a json string and returns it as a + // datum. + Term_JSON Term_TermType = 98 + // Returns the datum as a JSON string. + // N.B.: we would really prefer this be named TO_JSON and that exists as + // an alias in Python and JavaScript drivers; however it conflicts with the + // standard `to_json` method defined by Ruby's standard json library. + Term_TO_JSON_STRING Term_TermType = 172 + // Parses its first arguments as an ISO 8601 time and returns it as a + // datum. + Term_ISO8601 Term_TermType = 99 + // Prints a time as an ISO 8601 time. + Term_TO_ISO8601 Term_TermType = 100 + // Returns a time given seconds since epoch in UTC. + Term_EPOCH_TIME Term_TermType = 101 + // Returns seconds since epoch in UTC given a time. + Term_TO_EPOCH_TIME Term_TermType = 102 + // The time the query was received by the server. + Term_NOW Term_TermType = 103 + // Puts a time into an ISO 8601 timezone. + Term_IN_TIMEZONE Term_TermType = 104 + // a.during(b, c) returns whether a is in the range [b, c) + Term_DURING Term_TermType = 105 + // Retrieves the date portion of a time. + Term_DATE Term_TermType = 106 + // x.time_of_day == x.date - x + Term_TIME_OF_DAY Term_TermType = 126 + // Returns the timezone of a time. + Term_TIMEZONE Term_TermType = 127 + // These access the various components of a time. + Term_YEAR Term_TermType = 128 + Term_MONTH Term_TermType = 129 + Term_DAY Term_TermType = 130 + Term_DAY_OF_WEEK Term_TermType = 131 + Term_DAY_OF_YEAR Term_TermType = 132 + Term_HOURS Term_TermType = 133 + Term_MINUTES Term_TermType = 134 + Term_SECONDS Term_TermType = 135 + // Construct a time from a date and optional timezone or a + // date+time and optional timezone. + Term_TIME Term_TermType = 136 + // Constants for ISO 8601 days of the week. + Term_MONDAY Term_TermType = 107 + Term_TUESDAY Term_TermType = 108 + Term_WEDNESDAY Term_TermType = 109 + Term_THURSDAY Term_TermType = 110 + Term_FRIDAY Term_TermType = 111 + Term_SATURDAY Term_TermType = 112 + Term_SUNDAY Term_TermType = 113 + // Constants for ISO 8601 months. + Term_JANUARY Term_TermType = 114 + Term_FEBRUARY Term_TermType = 115 + Term_MARCH Term_TermType = 116 + Term_APRIL Term_TermType = 117 + Term_MAY Term_TermType = 118 + Term_JUNE Term_TermType = 119 + Term_JULY Term_TermType = 120 + Term_AUGUST Term_TermType = 121 + Term_SEPTEMBER Term_TermType = 122 + Term_OCTOBER Term_TermType = 123 + Term_NOVEMBER Term_TermType = 124 + Term_DECEMBER Term_TermType = 125 + // Indicates to MERGE to replace, or remove in case of an empty literal, the + // other object rather than merge it. + Term_LITERAL Term_TermType = 137 + // SEQUENCE, STRING -> GROUPED_SEQUENCE | SEQUENCE, FUNCTION -> GROUPED_SEQUENCE + Term_GROUP Term_TermType = 144 + Term_SUM Term_TermType = 145 + Term_AVG Term_TermType = 146 + Term_MIN Term_TermType = 147 + Term_MAX Term_TermType = 148 + // `str.split()` splits on whitespace + // `str.split(" ")` splits on spaces only + // `str.split(" ", 5)` splits on spaces with at most 5 results + // `str.split(nil, 5)` splits on whitespace with at most 5 results + Term_SPLIT Term_TermType = 149 + Term_UNGROUP Term_TermType = 150 + // Takes a range of numbers and returns a random number within the range + Term_RANDOM Term_TermType = 151 + Term_CHANGES Term_TermType = 152 + Term_ARGS Term_TermType = 154 + // BINARY is client-only at the moment, it is not supported on the server + Term_BINARY Term_TermType = 155 + Term_GEOJSON Term_TermType = 157 + Term_TO_GEOJSON Term_TermType = 158 + Term_POINT Term_TermType = 159 + Term_LINE Term_TermType = 160 + Term_POLYGON Term_TermType = 161 + Term_DISTANCE Term_TermType = 162 + Term_INTERSECTS Term_TermType = 163 + Term_INCLUDES Term_TermType = 164 + Term_CIRCLE Term_TermType = 165 + Term_GET_INTERSECTING Term_TermType = 166 + Term_FILL Term_TermType = 167 + Term_GET_NEAREST Term_TermType = 168 + Term_POLYGON_SUB Term_TermType = 171 + // Constants for specifying key ranges + Term_MINVAL Term_TermType = 180 + Term_MAXVAL Term_TermType = 181 +) + +var Term_TermType_name = map[int32]string{ + 1: "DATUM", + 2: "MAKE_ARRAY", + 3: "MAKE_OBJ", + 10: "VAR", + 11: "JAVASCRIPT", + 169: "UUID", + 153: "HTTP", + 12: "ERROR", + 13: "IMPLICIT_VAR", + 14: "DB", + 15: "TABLE", + 16: "GET", + 78: "GET_ALL", + 17: "EQ", + 18: "NE", + 19: "LT", + 20: "LE", + 21: "GT", + 22: "GE", + 23: "NOT", + 24: "ADD", + 25: "SUB", + 26: "MUL", + 27: "DIV", + 28: "MOD", + 183: "FLOOR", + 184: "CEIL", + 185: "ROUND", + 29: "APPEND", + 80: "PREPEND", + 95: "DIFFERENCE", + 88: "SET_INSERT", + 89: "SET_INTERSECTION", + 90: "SET_UNION", + 91: "SET_DIFFERENCE", + 30: "SLICE", + 70: "SKIP", + 71: "LIMIT", + 87: "OFFSETS_OF", + 93: "CONTAINS", + 31: "GET_FIELD", + 94: "KEYS", + 186: "VALUES", + 143: "OBJECT", + 32: "HAS_FIELDS", + 96: "WITH_FIELDS", + 33: "PLUCK", + 34: "WITHOUT", + 35: "MERGE", + 36: "BETWEEN_DEPRECATED", + 182: "BETWEEN", + 37: "REDUCE", + 38: "MAP", + 187: "FOLD", + 39: "FILTER", + 40: "CONCAT_MAP", + 41: "ORDER_BY", + 42: "DISTINCT", + 43: "COUNT", + 86: "IS_EMPTY", + 44: "UNION", + 45: "NTH", + 170: "BRACKET", + 48: "INNER_JOIN", + 49: "OUTER_JOIN", + 50: "EQ_JOIN", + 72: "ZIP", + 173: "RANGE", + 82: "INSERT_AT", + 83: "DELETE_AT", + 84: "CHANGE_AT", + 85: "SPLICE_AT", + 51: "COERCE_TO", + 52: "TYPE_OF", + 53: "UPDATE", + 54: "DELETE", + 55: "REPLACE", + 56: "INSERT", + 57: "DB_CREATE", + 58: "DB_DROP", + 59: "DB_LIST", + 60: "TABLE_CREATE", + 61: "TABLE_DROP", + 62: "TABLE_LIST", + 174: "CONFIG", + 175: "STATUS", + 177: "WAIT", + 176: "RECONFIGURE", + 179: "REBALANCE", + 138: "SYNC", + 188: "GRANT", + 75: "INDEX_CREATE", + 76: "INDEX_DROP", + 77: "INDEX_LIST", + 139: "INDEX_STATUS", + 140: "INDEX_WAIT", + 156: "INDEX_RENAME", + 64: "FUNCALL", + 65: "BRANCH", + 66: "OR", + 67: "AND", + 68: "FOR_EACH", + 69: "FUNC", + 73: "ASC", + 74: "DESC", + 79: "INFO", + 97: "MATCH", + 141: "UPCASE", + 142: "DOWNCASE", + 81: "SAMPLE", + 92: "DEFAULT", + 98: "JSON", + 172: "TO_JSON_STRING", + 99: "ISO8601", + 100: "TO_ISO8601", + 101: "EPOCH_TIME", + 102: "TO_EPOCH_TIME", + 103: "NOW", + 104: "IN_TIMEZONE", + 105: "DURING", + 106: "DATE", + 126: "TIME_OF_DAY", + 127: "TIMEZONE", + 128: "YEAR", + 129: "MONTH", + 130: "DAY", + 131: "DAY_OF_WEEK", + 132: "DAY_OF_YEAR", + 133: "HOURS", + 134: "MINUTES", + 135: "SECONDS", + 136: "TIME", + 107: "MONDAY", + 108: "TUESDAY", + 109: "WEDNESDAY", + 110: "THURSDAY", + 111: "FRIDAY", + 112: "SATURDAY", + 113: "SUNDAY", + 114: "JANUARY", + 115: "FEBRUARY", + 116: "MARCH", + 117: "APRIL", + 118: "MAY", + 119: "JUNE", + 120: "JULY", + 121: "AUGUST", + 122: "SEPTEMBER", + 123: "OCTOBER", + 124: "NOVEMBER", + 125: "DECEMBER", + 137: "LITERAL", + 144: "GROUP", + 145: "SUM", + 146: "AVG", + 147: "MIN", + 148: "MAX", + 149: "SPLIT", + 150: "UNGROUP", + 151: "RANDOM", + 152: "CHANGES", + 154: "ARGS", + 155: "BINARY", + 157: "GEOJSON", + 158: "TO_GEOJSON", + 159: "POINT", + 160: "LINE", + 161: "POLYGON", + 162: "DISTANCE", + 163: "INTERSECTS", + 164: "INCLUDES", + 165: "CIRCLE", + 166: "GET_INTERSECTING", + 167: "FILL", + 168: "GET_NEAREST", + 171: "POLYGON_SUB", + 180: "MINVAL", + 181: "MAXVAL", +} +var Term_TermType_value = map[string]int32{ + "DATUM": 1, + "MAKE_ARRAY": 2, + "MAKE_OBJ": 3, + "VAR": 10, + "JAVASCRIPT": 11, + "UUID": 169, + "HTTP": 153, + "ERROR": 12, + "IMPLICIT_VAR": 13, + "DB": 14, + "TABLE": 15, + "GET": 16, + "GET_ALL": 78, + "EQ": 17, + "NE": 18, + "LT": 19, + "LE": 20, + "GT": 21, + "GE": 22, + "NOT": 23, + "ADD": 24, + "SUB": 25, + "MUL": 26, + "DIV": 27, + "MOD": 28, + "FLOOR": 183, + "CEIL": 184, + "ROUND": 185, + "APPEND": 29, + "PREPEND": 80, + "DIFFERENCE": 95, + "SET_INSERT": 88, + "SET_INTERSECTION": 89, + "SET_UNION": 90, + "SET_DIFFERENCE": 91, + "SLICE": 30, + "SKIP": 70, + "LIMIT": 71, + "OFFSETS_OF": 87, + "CONTAINS": 93, + "GET_FIELD": 31, + "KEYS": 94, + "VALUES": 186, + "OBJECT": 143, + "HAS_FIELDS": 32, + "WITH_FIELDS": 96, + "PLUCK": 33, + "WITHOUT": 34, + "MERGE": 35, + "BETWEEN_DEPRECATED": 36, + "BETWEEN": 182, + "REDUCE": 37, + "MAP": 38, + "FOLD": 187, + "FILTER": 39, + "CONCAT_MAP": 40, + "ORDER_BY": 41, + "DISTINCT": 42, + "COUNT": 43, + "IS_EMPTY": 86, + "UNION": 44, + "NTH": 45, + "BRACKET": 170, + "INNER_JOIN": 48, + "OUTER_JOIN": 49, + "EQ_JOIN": 50, + "ZIP": 72, + "RANGE": 173, + "INSERT_AT": 82, + "DELETE_AT": 83, + "CHANGE_AT": 84, + "SPLICE_AT": 85, + "COERCE_TO": 51, + "TYPE_OF": 52, + "UPDATE": 53, + "DELETE": 54, + "REPLACE": 55, + "INSERT": 56, + "DB_CREATE": 57, + "DB_DROP": 58, + "DB_LIST": 59, + "TABLE_CREATE": 60, + "TABLE_DROP": 61, + "TABLE_LIST": 62, + "CONFIG": 174, + "STATUS": 175, + "WAIT": 177, + "RECONFIGURE": 176, + "REBALANCE": 179, + "SYNC": 138, + "GRANT": 188, + "INDEX_CREATE": 75, + "INDEX_DROP": 76, + "INDEX_LIST": 77, + "INDEX_STATUS": 139, + "INDEX_WAIT": 140, + "INDEX_RENAME": 156, + "FUNCALL": 64, + "BRANCH": 65, + "OR": 66, + "AND": 67, + "FOR_EACH": 68, + "FUNC": 69, + "ASC": 73, + "DESC": 74, + "INFO": 79, + "MATCH": 97, + "UPCASE": 141, + "DOWNCASE": 142, + "SAMPLE": 81, + "DEFAULT": 92, + "JSON": 98, + "TO_JSON_STRING": 172, + "ISO8601": 99, + "TO_ISO8601": 100, + "EPOCH_TIME": 101, + "TO_EPOCH_TIME": 102, + "NOW": 103, + "IN_TIMEZONE": 104, + "DURING": 105, + "DATE": 106, + "TIME_OF_DAY": 126, + "TIMEZONE": 127, + "YEAR": 128, + "MONTH": 129, + "DAY": 130, + "DAY_OF_WEEK": 131, + "DAY_OF_YEAR": 132, + "HOURS": 133, + "MINUTES": 134, + "SECONDS": 135, + "TIME": 136, + "MONDAY": 107, + "TUESDAY": 108, + "WEDNESDAY": 109, + "THURSDAY": 110, + "FRIDAY": 111, + "SATURDAY": 112, + "SUNDAY": 113, + "JANUARY": 114, + "FEBRUARY": 115, + "MARCH": 116, + "APRIL": 117, + "MAY": 118, + "JUNE": 119, + "JULY": 120, + "AUGUST": 121, + "SEPTEMBER": 122, + "OCTOBER": 123, + "NOVEMBER": 124, + "DECEMBER": 125, + "LITERAL": 137, + "GROUP": 144, + "SUM": 145, + "AVG": 146, + "MIN": 147, + "MAX": 148, + "SPLIT": 149, + "UNGROUP": 150, + "RANDOM": 151, + "CHANGES": 152, + "ARGS": 154, + "BINARY": 155, + "GEOJSON": 157, + "TO_GEOJSON": 158, + "POINT": 159, + "LINE": 160, + "POLYGON": 161, + "DISTANCE": 162, + "INTERSECTS": 163, + "INCLUDES": 164, + "CIRCLE": 165, + "GET_INTERSECTING": 166, + "FILL": 167, + "GET_NEAREST": 168, + "POLYGON_SUB": 171, + "MINVAL": 180, + "MAXVAL": 181, +} + +func (x Term_TermType) Enum() *Term_TermType { + p := new(Term_TermType) + *p = x + return p +} +func (x Term_TermType) String() string { + return proto.EnumName(Term_TermType_name, int32(x)) +} +func (x Term_TermType) MarshalJSON() ([]byte, error) { + return json.Marshal(x.String()) +} +func (x *Term_TermType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(Term_TermType_value, data, "Term_TermType") + if err != nil { + return err + } + *x = Term_TermType(value) + return nil +} + +type VersionDummy struct { + XXX_unrecognized []byte `json:"-"` +} + +func (m *VersionDummy) Reset() { *m = VersionDummy{} } +func (m *VersionDummy) String() string { return proto.CompactTextString(m) } +func (*VersionDummy) ProtoMessage() {} + +// You send one of: +// * A [START] query with a [Term] to evaluate and a unique-per-connection token. +// * A [CONTINUE] query with the same token as a [START] query that returned +// [SUCCESS_PARTIAL] in its [Response]. +// * A [STOP] query with the same token as a [START] query that you want to stop. +// * A [NOREPLY_WAIT] query with a unique per-connection token. The server answers +// with a [WAIT_COMPLETE] [Response]. +// * A [SERVER_INFO] query. The server answers with a [SERVER_INFO] [Response]. +type Query struct { + Type *Query_QueryType `protobuf:"varint,1,opt,name=type,enum=Query_QueryType" json:"type,omitempty"` + // A [Term] is how we represent the operations we want a query to perform. + Query *Term `protobuf:"bytes,2,opt,name=query" json:"query,omitempty"` + Token *int64 `protobuf:"varint,3,opt,name=token" json:"token,omitempty"` + // This flag is ignored on the server. `noreply` should be added + // to `global_optargs` instead (the key "noreply" should map to + // either true or false). + OBSOLETENoreply *bool `protobuf:"varint,4,opt,name=OBSOLETE_noreply,def=0" json:"OBSOLETE_noreply,omitempty"` + // If this is set to [true], then [Datum] values will sometimes be + // of [DatumType] [R_JSON] (see below). This can provide enormous + // speedups in languages with poor protobuf libraries. + AcceptsRJson *bool `protobuf:"varint,5,opt,name=accepts_r_json,def=0" json:"accepts_r_json,omitempty"` + GlobalOptargs []*Query_AssocPair `protobuf:"bytes,6,rep,name=global_optargs" json:"global_optargs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query) Reset() { *m = Query{} } +func (m *Query) String() string { return proto.CompactTextString(m) } +func (*Query) ProtoMessage() {} + +const Default_Query_OBSOLETENoreply bool = false +const Default_Query_AcceptsRJson bool = false + +func (m *Query) GetType() Query_QueryType { + if m != nil && m.Type != nil { + return *m.Type + } + return Query_START +} + +func (m *Query) GetQuery() *Term { + if m != nil { + return m.Query + } + return nil +} + +func (m *Query) GetToken() int64 { + if m != nil && m.Token != nil { + return *m.Token + } + return 0 +} + +func (m *Query) GetOBSOLETENoreply() bool { + if m != nil && m.OBSOLETENoreply != nil { + return *m.OBSOLETENoreply + } + return Default_Query_OBSOLETENoreply +} + +func (m *Query) GetAcceptsRJson() bool { + if m != nil && m.AcceptsRJson != nil { + return *m.AcceptsRJson + } + return Default_Query_AcceptsRJson +} + +func (m *Query) GetGlobalOptargs() []*Query_AssocPair { + if m != nil { + return m.GlobalOptargs + } + return nil +} + +type Query_AssocPair struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Val *Term `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Query_AssocPair) Reset() { *m = Query_AssocPair{} } +func (m *Query_AssocPair) String() string { return proto.CompactTextString(m) } +func (*Query_AssocPair) ProtoMessage() {} + +func (m *Query_AssocPair) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Query_AssocPair) GetVal() *Term { + if m != nil { + return m.Val + } + return nil +} + +// A backtrace frame (see `backtrace` in Response below) +type Frame struct { + Type *Frame_FrameType `protobuf:"varint,1,opt,name=type,enum=Frame_FrameType" json:"type,omitempty"` + Pos *int64 `protobuf:"varint,2,opt,name=pos" json:"pos,omitempty"` + Opt *string `protobuf:"bytes,3,opt,name=opt" json:"opt,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Frame) Reset() { *m = Frame{} } +func (m *Frame) String() string { return proto.CompactTextString(m) } +func (*Frame) ProtoMessage() {} + +func (m *Frame) GetType() Frame_FrameType { + if m != nil && m.Type != nil { + return *m.Type + } + return Frame_POS +} + +func (m *Frame) GetPos() int64 { + if m != nil && m.Pos != nil { + return *m.Pos + } + return 0 +} + +func (m *Frame) GetOpt() string { + if m != nil && m.Opt != nil { + return *m.Opt + } + return "" +} + +type Backtrace struct { + Frames []*Frame `protobuf:"bytes,1,rep,name=frames" json:"frames,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Backtrace) Reset() { *m = Backtrace{} } +func (m *Backtrace) String() string { return proto.CompactTextString(m) } +func (*Backtrace) ProtoMessage() {} + +func (m *Backtrace) GetFrames() []*Frame { + if m != nil { + return m.Frames + } + return nil +} + +// You get back a response with the same [token] as your query. +type Response struct { + Type *Response_ResponseType `protobuf:"varint,1,opt,name=type,enum=Response_ResponseType" json:"type,omitempty"` + ErrorType *Response_ErrorType `protobuf:"varint,7,opt,name=error_type,enum=Response_ErrorType" json:"error_type,omitempty"` + Notes []Response_ResponseNote `protobuf:"varint,6,rep,name=notes,enum=Response_ResponseNote" json:"notes,omitempty"` + Token *int64 `protobuf:"varint,2,opt,name=token" json:"token,omitempty"` + // [response] contains 1 RQL datum if [type] is [SUCCESS_ATOM] or + // [SERVER_INFO]. [response] contains many RQL data if [type] is + // [SUCCESS_SEQUENCE] or [SUCCESS_PARTIAL]. [response] contains 1 + // error message (of type [R_STR]) in all other cases. + Response []*Datum `protobuf:"bytes,3,rep,name=response" json:"response,omitempty"` + // If [type] is [CLIENT_ERROR], [TYPE_ERROR], or [RUNTIME_ERROR], then a + // backtrace will be provided. The backtrace says where in the query the + // error occurred. Ideally this information will be presented to the user as + // a pretty-printed version of their query with the erroneous section + // underlined. A backtrace is a series of 0 or more [Frame]s, each of which + // specifies either the index of a positional argument or the name of an + // optional argument. (Those words will make more sense if you look at the + // [Term] message below.) + Backtrace *Backtrace `protobuf:"bytes,4,opt,name=backtrace" json:"backtrace,omitempty"` + // If the [global_optargs] in the [Query] that this [Response] is a + // response to contains a key "profile" which maps to a static value of + // true then [profile] will contain a [Datum] which provides profiling + // information about the execution of the query. This field should be + // returned to the user along with the result that would normally be + // returned (a datum or a cursor). In official drivers this is accomplished + // by putting them inside of an object with "value" mapping to the return + // value and "profile" mapping to the profile object. + Profile *Datum `protobuf:"bytes,5,opt,name=profile" json:"profile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Response) Reset() { *m = Response{} } +func (m *Response) String() string { return proto.CompactTextString(m) } +func (*Response) ProtoMessage() {} + +func (m *Response) GetType() Response_ResponseType { + if m != nil && m.Type != nil { + return *m.Type + } + return Response_SUCCESS_ATOM +} + +func (m *Response) GetErrorType() Response_ErrorType { + if m != nil && m.ErrorType != nil { + return *m.ErrorType + } + return Response_INTERNAL +} + +func (m *Response) GetNotes() []Response_ResponseNote { + if m != nil { + return m.Notes + } + return nil +} + +func (m *Response) GetToken() int64 { + if m != nil && m.Token != nil { + return *m.Token + } + return 0 +} + +func (m *Response) GetResponse() []*Datum { + if m != nil { + return m.Response + } + return nil +} + +func (m *Response) GetBacktrace() *Backtrace { + if m != nil { + return m.Backtrace + } + return nil +} + +func (m *Response) GetProfile() *Datum { + if m != nil { + return m.Profile + } + return nil +} + +// A [Datum] is a chunk of data that can be serialized to disk or returned to +// the user in a Response. Currently we only support JSON types, but we may +// support other types in the future (e.g., a date type or an integer type). +type Datum struct { + Type *Datum_DatumType `protobuf:"varint,1,opt,name=type,enum=Datum_DatumType" json:"type,omitempty"` + RBool *bool `protobuf:"varint,2,opt,name=r_bool" json:"r_bool,omitempty"` + RNum *float64 `protobuf:"fixed64,3,opt,name=r_num" json:"r_num,omitempty"` + RStr *string `protobuf:"bytes,4,opt,name=r_str" json:"r_str,omitempty"` + RArray []*Datum `protobuf:"bytes,5,rep,name=r_array" json:"r_array,omitempty"` + RObject []*Datum_AssocPair `protobuf:"bytes,6,rep,name=r_object" json:"r_object,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Datum) Reset() { *m = Datum{} } +func (m *Datum) String() string { return proto.CompactTextString(m) } +func (*Datum) ProtoMessage() {} + +func (m *Datum) GetType() Datum_DatumType { + if m != nil && m.Type != nil { + return *m.Type + } + return Datum_R_NULL +} + +func (m *Datum) GetRBool() bool { + if m != nil && m.RBool != nil { + return *m.RBool + } + return false +} + +func (m *Datum) GetRNum() float64 { + if m != nil && m.RNum != nil { + return *m.RNum + } + return 0 +} + +func (m *Datum) GetRStr() string { + if m != nil && m.RStr != nil { + return *m.RStr + } + return "" +} + +func (m *Datum) GetRArray() []*Datum { + if m != nil { + return m.RArray + } + return nil +} + +func (m *Datum) GetRObject() []*Datum_AssocPair { + if m != nil { + return m.RObject + } + return nil +} + +type Datum_AssocPair struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Val *Datum `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Datum_AssocPair) Reset() { *m = Datum_AssocPair{} } +func (m *Datum_AssocPair) String() string { return proto.CompactTextString(m) } +func (*Datum_AssocPair) ProtoMessage() {} + +func (m *Datum_AssocPair) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Datum_AssocPair) GetVal() *Datum { + if m != nil { + return m.Val + } + return nil +} + +// A [Term] is either a piece of data (see **Datum** above), or an operator and +// its operands. If you have a [Datum], it's stored in the member [datum]. If +// you have an operator, its positional arguments are stored in [args] and its +// optional arguments are stored in [optargs]. +// +// A note about type signatures: +// We use the following notation to denote types: +// arg1_type, arg2_type, argrest_type... -> result_type +// So, for example, if we have a function `avg` that takes any number of +// arguments and averages them, we might write: +// NUMBER... -> NUMBER +// Or if we had a function that took one number modulo another: +// NUMBER, NUMBER -> NUMBER +// Or a function that takes a table and a primary key of any Datum type, then +// retrieves the entry with that primary key: +// Table, DATUM -> OBJECT +// Some arguments must be provided as literal values (and not the results of sub +// terms). These are marked with a `!`. +// Optional arguments are specified within curly braces as argname `:` value +// type (e.x `{noreply:BOOL}`) +// Many RQL operations are polymorphic. For these, alterantive type signatures +// are separated by `|`. +// +// The RQL type hierarchy is as follows: +// Top +// DATUM +// NULL +// BOOL +// NUMBER +// STRING +// OBJECT +// SingleSelection +// ARRAY +// Sequence +// ARRAY +// Stream +// StreamSelection +// Table +// Database +// Function +// Ordering - used only by ORDER_BY +// Pathspec -- an object, string, or array that specifies a path +// Error +type Term struct { + Type *Term_TermType `protobuf:"varint,1,opt,name=type,enum=Term_TermType" json:"type,omitempty"` + // This is only used when type is DATUM. + Datum *Datum `protobuf:"bytes,2,opt,name=datum" json:"datum,omitempty"` + Args []*Term `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` + Optargs []*Term_AssocPair `protobuf:"bytes,4,rep,name=optargs" json:"optargs,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Term) Reset() { *m = Term{} } +func (m *Term) String() string { return proto.CompactTextString(m) } +func (*Term) ProtoMessage() {} + +func (m *Term) GetType() Term_TermType { + if m != nil && m.Type != nil { + return *m.Type + } + return Term_DATUM +} + +func (m *Term) GetDatum() *Datum { + if m != nil { + return m.Datum + } + return nil +} + +func (m *Term) GetArgs() []*Term { + if m != nil { + return m.Args + } + return nil +} + +func (m *Term) GetOptargs() []*Term_AssocPair { + if m != nil { + return m.Optargs + } + return nil +} + +type Term_AssocPair struct { + Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"` + Val *Term `protobuf:"bytes,2,opt,name=val" json:"val,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Term_AssocPair) Reset() { *m = Term_AssocPair{} } +func (m *Term_AssocPair) String() string { return proto.CompactTextString(m) } +func (*Term_AssocPair) ProtoMessage() {} + +func (m *Term_AssocPair) GetKey() string { + if m != nil && m.Key != nil { + return *m.Key + } + return "" +} + +func (m *Term_AssocPair) GetVal() *Term { + if m != nil { + return m.Val + } + return nil +} + +func init() { + proto.RegisterEnum("VersionDummy_Version", VersionDummy_Version_name, VersionDummy_Version_value) + proto.RegisterEnum("VersionDummy_Protocol", VersionDummy_Protocol_name, VersionDummy_Protocol_value) + proto.RegisterEnum("Query_QueryType", Query_QueryType_name, Query_QueryType_value) + proto.RegisterEnum("Frame_FrameType", Frame_FrameType_name, Frame_FrameType_value) + proto.RegisterEnum("Response_ResponseType", Response_ResponseType_name, Response_ResponseType_value) + proto.RegisterEnum("Response_ErrorType", Response_ErrorType_name, Response_ErrorType_value) + proto.RegisterEnum("Response_ResponseNote", Response_ResponseNote_name, Response_ResponseNote_value) + proto.RegisterEnum("Datum_DatumType", Datum_DatumType_name, Datum_DatumType_value) + proto.RegisterEnum("Term_TermType", Term_TermType_name, Term_TermType_value) +} diff --git a/vendor/gopkg.in/gorethink/gorethink.v2/types/geometry.go b/vendor/gopkg.in/gorethink/gorethink.v2/types/geometry.go new file mode 100644 index 0000000..00ff80f --- /dev/null +++ b/vendor/gopkg.in/gorethink/gorethink.v2/types/geometry.go @@ -0,0 +1,225 @@ +package types + +import ( + "fmt" +) + +type Geometry struct { + Type string + Point Point + Line Line + Lines Lines +} + +func (g Geometry) MarshalRQL() (interface{}, error) { + switch g.Type { + case "Point": + return g.Point.MarshalRQL() + case "LineString": + return g.Line.MarshalRQL() + case "Polygon": + return g.Lines.MarshalRQL() + default: + return nil, fmt.Errorf("pseudo-type GEOMETRY object field 'type' %s is not valid", g.Type) + } +} + +func (g *Geometry) UnmarshalRQL(data interface{}) error { + if data, ok := data.(Geometry); ok { + g.Type = data.Type + g.Point = data.Point + g.Line = data.Line + g.Lines = data.Lines + + return nil + } + + m, ok := data.(map[string]interface{}) + if !ok { + return fmt.Errorf("pseudo-type GEOMETRY object is not valid") + } + + typ, ok := m["type"] + if !ok { + return fmt.Errorf("pseudo-type GEOMETRY object is not valid, expects 'type' field") + } + coords, ok := m["coordinates"] + if !ok { + return fmt.Errorf("pseudo-type GEOMETRY object is not valid, expects 'coordinates' field") + } + + var err error + switch typ { + case "Point": + g.Type = "Point" + g.Point, err = UnmarshalPoint(coords) + case "LineString": + g.Type = "LineString" + g.Line, err = UnmarshalLineString(coords) + case "Polygon": + g.Type = "Polygon" + g.Lines, err = UnmarshalPolygon(coords) + default: + return fmt.Errorf("pseudo-type GEOMETRY object has invalid type") + } + + if err != nil { + return err + } + + return nil +} + +type Point struct { + Lon float64 + Lat float64 +} +type Line []Point +type Lines []Line + +func (p Point) Coords() interface{} { + return []interface{}{p.Lon, p.Lat} +} + +func (p Point) MarshalRQL() (interface{}, error) { + return map[string]interface{}{ + "$reql_type$": "GEOMETRY", + "coordinates": p.Coords(), + "type": "Point", + }, nil +} + +func (p *Point) UnmarshalRQL(data interface{}) error { + g := &Geometry{} + err := g.UnmarshalRQL(data) + if err != nil { + return err + } + if g.Type != "Point" { + return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "Point") + } + + p.Lat = g.Point.Lat + p.Lon = g.Point.Lon + + return nil +} + +func (l Line) Coords() interface{} { + coords := make([]interface{}, len(l)) + for i, point := range l { + coords[i] = point.Coords() + } + return coords +} + +func (l Line) MarshalRQL() (interface{}, error) { + return map[string]interface{}{ + "$reql_type$": "GEOMETRY", + "coordinates": l.Coords(), + "type": "LineString", + }, nil +} + +func (l *Line) UnmarshalRQL(data interface{}) error { + g := &Geometry{} + err := g.UnmarshalRQL(data) + if err != nil { + return err + } + if g.Type != "LineString" { + return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "LineString") + } + + *l = g.Line + + return nil +} + +func (l Lines) Coords() interface{} { + coords := make([]interface{}, len(l)) + for i, line := range l { + coords[i] = line.Coords() + } + return coords +} + +func (l Lines) MarshalRQL() (interface{}, error) { + return map[string]interface{}{ + "$reql_type$": "GEOMETRY", + "coordinates": l.Coords(), + "type": "Polygon", + }, nil +} + +func (l *Lines) UnmarshalRQL(data interface{}) error { + g := &Geometry{} + err := g.UnmarshalRQL(data) + if err != nil { + return err + } + if g.Type != "Polygon" { + return fmt.Errorf("pseudo-type GEOMETRY object has type %s, expected type %s", g.Type, "Polygon") + } + + *l = g.Lines + + return nil +} + +func UnmarshalPoint(v interface{}) (Point, error) { + coords, ok := v.([]interface{}) + if !ok { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + if len(coords) != 2 { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + lon, ok := coords[0].(float64) + if !ok { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + lat, ok := coords[1].(float64) + if !ok { + return Point{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + + return Point{ + Lon: lon, + Lat: lat, + }, nil +} + +func UnmarshalLineString(v interface{}) (Line, error) { + points, ok := v.([]interface{}) + if !ok { + return Line{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + + var err error + line := make(Line, len(points)) + for i, coords := range points { + line[i], err = UnmarshalPoint(coords) + if err != nil { + return Line{}, err + } + } + return line, nil +} + +func UnmarshalPolygon(v interface{}) (Lines, error) { + lines, ok := v.([]interface{}) + if !ok { + return Lines{}, fmt.Errorf("pseudo-type GEOMETRY object field 'coordinates' is not valid") + } + + var err error + polygon := make(Lines, len(lines)) + for i, line := range lines { + polygon[i], err = UnmarshalLineString(line) + if err != nil { + return Lines{}, err + } + } + return polygon, nil +}