From 931404890e3eff53cd8907c005ba4e02bd0feb1a Mon Sep 17 00:00:00 2001 From: Christine Dodrill Date: Fri, 25 Aug 2017 01:22:58 -0700 Subject: [PATCH] initial commit --- .gitignore | 2 + bot/bot.go | 198 ++ bot/doc.go | 10 + bot/help.go | 29 + box.rb | 32 + config.nims | 7 + docker-compose.yml | 15 + vendor-log | 41 + .../Shopify/sarama/api_versions_request.go | 24 + .../Shopify/sarama/api_versions_response.go | 87 + .../Shopify/sarama/async_producer.go | 904 ++++++++ vendor/github.com/Shopify/sarama/broker.go | 685 ++++++ vendor/github.com/Shopify/sarama/client.go | 791 +++++++ vendor/github.com/Shopify/sarama/config.go | 430 ++++ vendor/github.com/Shopify/sarama/consumer.go | 747 +++++++ .../Shopify/sarama/consumer_group_members.go | 94 + .../sarama/consumer_metadata_request.go | 26 + .../sarama/consumer_metadata_response.go | 85 + .../github.com/Shopify/sarama/crc32_field.go | 35 + .../Shopify/sarama/describe_groups_request.go | 30 + .../sarama/describe_groups_response.go | 187 ++ .../Shopify/sarama/encoder_decoder.go | 89 + vendor/github.com/Shopify/sarama/errors.go | 221 ++ .../Shopify/sarama/fetch_request.go | 150 ++ .../Shopify/sarama/fetch_response.go | 210 ++ .../Shopify/sarama/heartbeat_request.go | 47 + .../Shopify/sarama/heartbeat_response.go | 32 + .../Shopify/sarama/join_group_request.go | 143 ++ .../Shopify/sarama/join_group_response.go | 115 + .../Shopify/sarama/leave_group_request.go | 40 + .../Shopify/sarama/leave_group_response.go | 32 + .../github.com/Shopify/sarama/length_field.go | 29 + .../Shopify/sarama/list_groups_request.go | 24 + .../Shopify/sarama/list_groups_response.go | 69 + vendor/github.com/Shopify/sarama/message.go | 212 ++ .../github.com/Shopify/sarama/message_set.go | 89 + .../Shopify/sarama/metadata_request.go | 52 + .../Shopify/sarama/metadata_response.go | 239 ++ vendor/github.com/Shopify/sarama/metrics.go | 51 + .../github.com/Shopify/sarama/mockbroker.go | 324 +++ .../Shopify/sarama/mockresponses.go | 455 ++++ .../Shopify/sarama/offset_commit_request.go | 190 ++ .../Shopify/sarama/offset_commit_response.go | 85 + .../Shopify/sarama/offset_fetch_request.go | 81 + .../Shopify/sarama/offset_fetch_response.go | 143 ++ .../Shopify/sarama/offset_manager.go | 542 +++++ .../Shopify/sarama/offset_request.go | 132 ++ .../Shopify/sarama/offset_response.go | 174 ++ .../Shopify/sarama/packet_decoder.go | 45 + .../Shopify/sarama/packet_encoder.go | 50 + .../github.com/Shopify/sarama/partitioner.go | 135 ++ .../github.com/Shopify/sarama/prep_encoder.go | 121 + .../Shopify/sarama/produce_request.go | 209 ++ .../Shopify/sarama/produce_response.go | 159 ++ .../github.com/Shopify/sarama/produce_set.go | 176 ++ .../github.com/Shopify/sarama/real_decoder.go | 260 +++ .../github.com/Shopify/sarama/real_encoder.go | 129 ++ vendor/github.com/Shopify/sarama/request.go | 119 + .../Shopify/sarama/response_header.go | 21 + vendor/github.com/Shopify/sarama/sarama.go | 99 + .../Shopify/sarama/sasl_handshake_request.go | 33 + .../Shopify/sarama/sasl_handshake_response.go | 38 + .../Shopify/sarama/sync_group_request.go | 100 + .../Shopify/sarama/sync_group_response.go | 41 + .../Shopify/sarama/sync_producer.go | 164 ++ vendor/github.com/Shopify/sarama/utils.go | 150 ++ vendor/github.com/Xe/ln/doc.go | 25 + vendor/github.com/Xe/ln/filter.go | 67 + vendor/github.com/Xe/ln/formatter.go | 111 + vendor/github.com/Xe/ln/logger.go | 175 ++ vendor/github.com/Xe/ln/stack.go | 44 + .../lib/go/thrift/application_exception.go | 142 ++ .../thrift/lib/go/thrift/binary_protocol.go | 514 +++++ .../lib/go/thrift/buffered_transport.go | 91 + .../thrift/lib/go/thrift/common_test_go17.go | 32 + .../lib/go/thrift/common_test_pre_go17.go | 32 + .../thrift/lib/go/thrift/compact_protocol.go | 815 +++++++ .../thrift/lib/go/thrift/debug_protocol.go | 269 +++ .../thrift/lib/go/thrift/deserializer.go | 58 + .../apache/thrift/lib/go/thrift/exception.go | 44 + .../apache/thrift/lib/go/thrift/field.go | 79 + .../thrift/lib/go/thrift/framed_transport.go | 172 ++ .../apache/thrift/lib/go/thrift/go17.go | 26 + .../thrift/lib/go/thrift/http_client.go | 238 ++ .../thrift/lib/go/thrift/http_transport.go | 51 + .../lib/go/thrift/http_transport_go17.go | 38 + .../lib/go/thrift/http_transport_pre_go17.go | 40 + .../lib/go/thrift/iostream_transport.go | 213 ++ .../thrift/lib/go/thrift/json_protocol.go | 583 +++++ .../thrift/lib/go/thrift/memory_buffer.go | 79 + .../thrift/lib/go/thrift/messagetype.go | 31 + .../lib/go/thrift/multiplexed_protocol.go | 139 ++ .../go/thrift/multiplexed_protocol_go17.go | 53 + .../thrift/multiplexed_protocol_pre_go17.go | 54 + .../apache/thrift/lib/go/thrift/numeric.go | 164 ++ .../apache/thrift/lib/go/thrift/pointerize.go | 50 + .../apache/thrift/lib/go/thrift/pre_go17.go | 26 + .../apache/thrift/lib/go/thrift/processor.go | 34 + .../thrift/lib/go/thrift/processor_factory.go | 58 + .../thrift/lib/go/thrift/processor_go17.go | 34 + .../apache/thrift/lib/go/thrift/protocol.go | 178 ++ .../lib/go/thrift/protocol_exception.go | 77 + .../thrift/lib/go/thrift/protocol_factory.go | 25 + .../thrift/lib/go/thrift/rich_transport.go | 68 + .../apache/thrift/lib/go/thrift/serializer.go | 75 + .../apache/thrift/lib/go/thrift/server.go | 35 + .../thrift/lib/go/thrift/server_socket.go | 134 ++ .../thrift/lib/go/thrift/server_transport.go | 34 + .../lib/go/thrift/simple_json_protocol.go | 1337 +++++++++++ .../thrift/lib/go/thrift/simple_server.go | 215 ++ .../apache/thrift/lib/go/thrift/socket.go | 165 ++ .../thrift/lib/go/thrift/ssl_server_socket.go | 112 + .../apache/thrift/lib/go/thrift/ssl_socket.go | 173 ++ .../apache/thrift/lib/go/thrift/transport.go | 65 + .../lib/go/thrift/transport_exception.go | 90 + .../thrift/lib/go/thrift/transport_factory.go | 39 + .../apache/thrift/lib/go/thrift/type.go | 69 + .../thrift/lib/go/thrift/zlib_transport.go | 116 + .../github.com/bwmarrin/discordgo/discord.go | 145 ++ .../bwmarrin/discordgo/endpoints.go | 127 ++ vendor/github.com/bwmarrin/discordgo/event.go | 230 ++ .../bwmarrin/discordgo/eventhandlers.go | 1030 +++++++++ .../github.com/bwmarrin/discordgo/events.go | 253 +++ .../github.com/bwmarrin/discordgo/logging.go | 95 + .../github.com/bwmarrin/discordgo/message.go | 180 ++ .../github.com/bwmarrin/discordgo/oauth2.go | 126 ++ .../bwmarrin/discordgo/ratelimit.go | 144 ++ .../github.com/bwmarrin/discordgo/restapi.go | 1983 +++++++++++++++++ vendor/github.com/bwmarrin/discordgo/state.go | 949 ++++++++ .../github.com/bwmarrin/discordgo/structs.go | 581 +++++ vendor/github.com/bwmarrin/discordgo/types.go | 58 + vendor/github.com/bwmarrin/discordgo/user.go | 26 + vendor/github.com/bwmarrin/discordgo/voice.go | 878 ++++++++ vendor/github.com/bwmarrin/discordgo/wsapi.go | 750 +++++++ .../github.com/davecgh/go-spew/spew/bypass.go | 152 ++ .../davecgh/go-spew/spew/bypasssafe.go | 38 + .../github.com/davecgh/go-spew/spew/common.go | 341 +++ .../github.com/davecgh/go-spew/spew/config.go | 306 +++ vendor/github.com/davecgh/go-spew/spew/doc.go | 211 ++ .../github.com/davecgh/go-spew/spew/dump.go | 509 +++++ .../davecgh/go-spew/spew/dumpcgo_test.go | 99 + .../github.com/davecgh/go-spew/spew/format.go | 419 ++++ .../github.com/davecgh/go-spew/spew/spew.go | 148 ++ .../eapache/go-resiliency/breaker/breaker.go | 161 ++ .../eapache/go-xerial-snappy/snappy.go | 43 + vendor/github.com/eapache/queue/queue.go | 102 + vendor/github.com/go-logfmt/logfmt/decode.go | 237 ++ vendor/github.com/go-logfmt/logfmt/doc.go | 6 + vendor/github.com/go-logfmt/logfmt/encode.go | 321 +++ vendor/github.com/go-logfmt/logfmt/fuzz.go | 126 ++ .../github.com/go-logfmt/logfmt/jsonstring.go | 277 +++ .../github.com/gogo/protobuf/proto/clone.go | 234 ++ .../github.com/gogo/protobuf/proto/decode.go | 978 ++++++++ .../gogo/protobuf/proto/decode_gogo.go | 172 ++ .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 203 ++ .../github.com/gogo/protobuf/proto/encode.go | 1362 +++++++++++ .../gogo/protobuf/proto/encode_gogo.go | 350 +++ .../github.com/gogo/protobuf/proto/equal.go | 300 +++ .../gogo/protobuf/proto/extensions.go | 693 ++++++ .../gogo/protobuf/proto/extensions_gogo.go | 294 +++ vendor/github.com/gogo/protobuf/proto/lib.go | 897 ++++++++ .../gogo/protobuf/proto/lib_gogo.go | 42 + .../gogo/protobuf/proto/message_set.go | 311 +++ .../gogo/protobuf/proto/pointer_reflect.go | 484 ++++ .../protobuf/proto/pointer_reflect_gogo.go | 85 + .../gogo/protobuf/proto/pointer_unsafe.go | 270 +++ .../protobuf/proto/pointer_unsafe_gogo.go | 128 ++ .../gogo/protobuf/proto/properties.go | 968 ++++++++ .../gogo/protobuf/proto/properties_gogo.go | 111 + .../gogo/protobuf/proto/skip_gogo.go | 119 + vendor/github.com/gogo/protobuf/proto/text.go | 928 ++++++++ .../gogo/protobuf/proto/text_gogo.go | 57 + .../gogo/protobuf/proto/text_parser.go | 1013 +++++++++ .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 229 ++ vendor/github.com/golang/snappy/decode.go | 237 ++ .../github.com/golang/snappy/decode_amd64.go | 14 + .../github.com/golang/snappy/decode_amd64.s | 490 ++++ .../github.com/golang/snappy/decode_other.go | 101 + vendor/github.com/golang/snappy/encode.go | 285 +++ .../github.com/golang/snappy/encode_amd64.go | 29 + .../github.com/golang/snappy/encode_amd64.s | 730 ++++++ .../github.com/golang/snappy/encode_other.go | 238 ++ vendor/github.com/golang/snappy/snappy.go | 87 + vendor/github.com/google/gops/agent/agent.go | 237 ++ .../google/gops/internal/internal.go | 52 + .../github.com/google/gops/signal/signal.go | 35 + vendor/github.com/gorilla/websocket/client.go | 392 ++++ .../gorilla/websocket/client_clone.go | 16 + .../gorilla/websocket/client_clone_legacy.go | 38 + .../gorilla/websocket/compression.go | 148 ++ vendor/github.com/gorilla/websocket/conn.go | 1149 ++++++++++ .../github.com/gorilla/websocket/conn_read.go | 18 + .../gorilla/websocket/conn_read_legacy.go | 21 + vendor/github.com/gorilla/websocket/doc.go | 179 ++ vendor/github.com/gorilla/websocket/json.go | 60 + vendor/github.com/gorilla/websocket/mask.go | 55 + .../github.com/gorilla/websocket/mask_safe.go | 15 + .../github.com/gorilla/websocket/prepared.go | 103 + vendor/github.com/gorilla/websocket/server.go | 292 +++ vendor/github.com/gorilla/websocket/util.go | 214 ++ .../joho/godotenv/autoload/autoload.go | 15 + vendor/github.com/joho/godotenv/godotenv.go | 253 +++ vendor/github.com/justinian/dice/dice.go | 55 + vendor/github.com/justinian/dice/eote.go | 168 ++ vendor/github.com/justinian/dice/fudge.go | 63 + vendor/github.com/justinian/dice/std.go | 105 + vendor/github.com/justinian/dice/versus.go | 92 + vendor/github.com/kardianos/osext/osext.go | 33 + .../github.com/kardianos/osext/osext_go18.go | 9 + .../github.com/kardianos/osext/osext_plan9.go | 22 + .../kardianos/osext/osext_procfs.go | 36 + .../kardianos/osext/osext_sysctl.go | 126 ++ .../kardianos/osext/osext_windows.go | 36 + vendor/github.com/namsral/flag/extras.go | 185 ++ vendor/github.com/namsral/flag/flag.go | 1015 +++++++++ vendor/github.com/nishanths/go-xkcd/client.go | 62 + vendor/github.com/nishanths/go-xkcd/comics.go | 153 ++ vendor/github.com/nishanths/go-xkcd/config.go | 7 + vendor/github.com/nishanths/go-xkcd/doc.go | 21 + vendor/github.com/nishanths/go-xkcd/errors.go | 25 + vendor/github.com/nishanths/go-xkcd/random.go | 32 + .../go-observer/observer.go | 39 + .../opentracing/opentracing-go/ext/tags.go | 210 ++ .../opentracing-go/globaltracer.go | 32 + .../opentracing/opentracing-go/gocontext.go | 57 + .../opentracing/opentracing-go/log/field.go | 269 +++ .../opentracing/opentracing-go/log/util.go | 54 + .../opentracing/opentracing-go/noop.go | 64 + .../opentracing/opentracing-go/propagation.go | 176 ++ .../opentracing/opentracing-go/span.go | 185 ++ .../opentracing/opentracing-go/tracer.go | 305 +++ .../zipkin-go-opentracing/collector-http.go | 234 ++ .../zipkin-go-opentracing/collector-kafka.go | 95 + .../zipkin-go-opentracing/collector-scribe.go | 235 ++ .../zipkin-go-opentracing/collector.go | 77 + .../zipkin-go-opentracing/context.go | 61 + .../openzipkin/zipkin-go-opentracing/debug.go | 78 + .../openzipkin/zipkin-go-opentracing/event.go | 62 + .../zipkin-go-opentracing/flag/flags.go | 39 + .../log-materializers.go | 113 + .../zipkin-go-opentracing/logger.go | 64 + .../zipkin-go-opentracing/observer.go | 52 + .../zipkin-go-opentracing/propagation.go | 68 + .../zipkin-go-opentracing/propagation_ot.go | 252 +++ .../openzipkin/zipkin-go-opentracing/raw.go | 30 + .../zipkin-go-opentracing/recorder.go | 60 + .../zipkin-go-opentracing/sample.go | 99 + .../openzipkin/zipkin-go-opentracing/span.go | 290 +++ .../gen-go/scribe/GoUnusedProtection__.go | 7 + .../thrift/gen-go/scribe/scribe-consts.go | 24 + .../thrift/gen-go/scribe/scribe.go | 617 +++++ .../gen-go/zipkincore/GoUnusedProtection__.go | 7 + .../gen-go/zipkincore/zipkinCore-consts.go | 45 + .../thrift/gen-go/zipkincore/zipkinCore.go | 1285 +++++++++++ .../zipkin-go-opentracing/tracer.go | 440 ++++ .../zipkin-go-opentracing/types/traceid.go | 40 + .../openzipkin/zipkin-go-opentracing/util.go | 25 + .../zipkin-go-opentracing/wire/carrier.go | 65 + .../zipkin-go-opentracing/wire/gen.go | 6 + .../zipkin-go-opentracing/wire/wire.pb.go | 647 ++++++ .../zipkin-go-opentracing/zipkin-endpoint.go | 72 + .../zipkin-go-opentracing/zipkin-recorder.go | 287 +++ vendor/github.com/pierrec/lz4/block.go | 445 ++++ vendor/github.com/pierrec/lz4/lz4.go | 105 + vendor/github.com/pierrec/lz4/reader.go | 364 +++ vendor/github.com/pierrec/lz4/writer.go | 377 ++++ .../pierrec/xxHash/xxHash32/xxHash32.go | 212 ++ vendor/github.com/pkg/errors/errors.go | 269 +++ vendor/github.com/pkg/errors/stack.go | 186 ++ .../github.com/rcrowley/go-metrics/counter.go | 112 + .../github.com/rcrowley/go-metrics/debug.go | 76 + vendor/github.com/rcrowley/go-metrics/ewma.go | 118 + .../github.com/rcrowley/go-metrics/gauge.go | 120 + .../rcrowley/go-metrics/gauge_float64.go | 127 ++ .../rcrowley/go-metrics/graphite.go | 113 + .../rcrowley/go-metrics/healthcheck.go | 61 + .../rcrowley/go-metrics/histogram.go | 202 ++ vendor/github.com/rcrowley/go-metrics/json.go | 87 + vendor/github.com/rcrowley/go-metrics/log.go | 80 + .../github.com/rcrowley/go-metrics/meter.go | 233 ++ .../github.com/rcrowley/go-metrics/metrics.go | 13 + .../rcrowley/go-metrics/opentsdb.go | 119 + .../rcrowley/go-metrics/registry.go | 270 +++ .../github.com/rcrowley/go-metrics/runtime.go | 212 ++ .../rcrowley/go-metrics/runtime_cgo.go | 10 + .../go-metrics/runtime_gccpufraction.go | 9 + .../rcrowley/go-metrics/runtime_no_cgo.go | 7 + .../go-metrics/runtime_no_gccpufraction.go | 9 + .../github.com/rcrowley/go-metrics/sample.go | 616 +++++ .../github.com/rcrowley/go-metrics/syslog.go | 78 + .../github.com/rcrowley/go-metrics/timer.go | 311 +++ .../github.com/rcrowley/go-metrics/writer.go | 100 + .../github.com/robfig/cron/constantdelay.go | 27 + vendor/github.com/robfig/cron/cron.go | 259 +++ vendor/github.com/robfig/cron/doc.go | 129 ++ vendor/github.com/robfig/cron/parser.go | 380 ++++ vendor/github.com/robfig/cron/spec.go | 158 ++ .../x/crypto/nacl/secretbox/secretbox.go | 149 ++ .../golang.org/x/crypto/poly1305/poly1305.go | 33 + .../golang.org/x/crypto/poly1305/sum_amd64.go | 22 + .../golang.org/x/crypto/poly1305/sum_amd64.s | 125 ++ .../golang.org/x/crypto/poly1305/sum_arm.go | 22 + .../golang.org/x/crypto/poly1305/sum_ref.go | 141 ++ .../x/crypto/salsa20/salsa/hsalsa20.go | 144 ++ .../x/crypto/salsa20/salsa/salsa2020_amd64.s | 889 ++++++++ .../x/crypto/salsa20/salsa/salsa208.go | 199 ++ .../x/crypto/salsa20/salsa/salsa20_amd64.go | 24 + .../x/crypto/salsa20/salsa/salsa20_ref.go | 234 ++ vendor/golang.org/x/net/context/context.go | 54 + .../golang.org/x/net/context/context_test.go | 583 +++++ vendor/golang.org/x/net/context/go17.go | 72 + vendor/golang.org/x/net/context/go19.go | 20 + vendor/golang.org/x/net/context/pre_go17.go | 300 +++ vendor/golang.org/x/net/context/pre_go19.go | 109 + vendor/golang.org/x/time/rate/rate.go | 371 +++ 317 files changed, 63658 insertions(+) create mode 100644 .gitignore create mode 100644 bot/bot.go create mode 100644 bot/doc.go create mode 100644 bot/help.go create mode 100644 box.rb create mode 100644 config.nims create mode 100644 docker-compose.yml create mode 100644 vendor-log create mode 100644 vendor/github.com/Shopify/sarama/api_versions_request.go create mode 100644 vendor/github.com/Shopify/sarama/api_versions_response.go create mode 100644 vendor/github.com/Shopify/sarama/async_producer.go create mode 100644 vendor/github.com/Shopify/sarama/broker.go create mode 100644 vendor/github.com/Shopify/sarama/client.go create mode 100644 vendor/github.com/Shopify/sarama/config.go create mode 100644 vendor/github.com/Shopify/sarama/consumer.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_group_members.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_request.go create mode 100644 vendor/github.com/Shopify/sarama/consumer_metadata_response.go create mode 100644 vendor/github.com/Shopify/sarama/crc32_field.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/describe_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/encoder_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/errors.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/fetch_response.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_request.go create mode 100644 vendor/github.com/Shopify/sarama/heartbeat_response.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/join_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/leave_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/length_field.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_request.go create mode 100644 vendor/github.com/Shopify/sarama/list_groups_response.go create mode 100644 vendor/github.com/Shopify/sarama/message.go create mode 100644 vendor/github.com/Shopify/sarama/message_set.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_request.go create mode 100644 vendor/github.com/Shopify/sarama/metadata_response.go create mode 100644 vendor/github.com/Shopify/sarama/metrics.go create mode 100644 vendor/github.com/Shopify/sarama/mockbroker.go create mode 100644 vendor/github.com/Shopify/sarama/mockresponses.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_commit_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_fetch_response.go create mode 100644 vendor/github.com/Shopify/sarama/offset_manager.go create mode 100644 vendor/github.com/Shopify/sarama/offset_request.go create mode 100644 vendor/github.com/Shopify/sarama/offset_response.go create mode 100644 vendor/github.com/Shopify/sarama/packet_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/packet_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/partitioner.go create mode 100644 vendor/github.com/Shopify/sarama/prep_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/produce_request.go create mode 100644 vendor/github.com/Shopify/sarama/produce_response.go create mode 100644 vendor/github.com/Shopify/sarama/produce_set.go create mode 100644 vendor/github.com/Shopify/sarama/real_decoder.go create mode 100644 vendor/github.com/Shopify/sarama/real_encoder.go create mode 100644 vendor/github.com/Shopify/sarama/request.go create mode 100644 vendor/github.com/Shopify/sarama/response_header.go create mode 100644 vendor/github.com/Shopify/sarama/sarama.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_request.go create mode 100644 vendor/github.com/Shopify/sarama/sasl_handshake_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_request.go create mode 100644 vendor/github.com/Shopify/sarama/sync_group_response.go create mode 100644 vendor/github.com/Shopify/sarama/sync_producer.go create mode 100644 vendor/github.com/Shopify/sarama/utils.go create mode 100644 vendor/github.com/Xe/ln/doc.go create mode 100644 vendor/github.com/Xe/ln/filter.go create mode 100644 vendor/github.com/Xe/ln/formatter.go create mode 100644 vendor/github.com/Xe/ln/logger.go create mode 100644 vendor/github.com/Xe/ln/stack.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/field.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_client.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/numeric.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/serializer.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/type.go create mode 100644 vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go create mode 100644 vendor/github.com/bwmarrin/discordgo/discord.go create mode 100644 vendor/github.com/bwmarrin/discordgo/endpoints.go create mode 100644 vendor/github.com/bwmarrin/discordgo/event.go create mode 100644 vendor/github.com/bwmarrin/discordgo/eventhandlers.go create mode 100644 vendor/github.com/bwmarrin/discordgo/events.go create mode 100644 vendor/github.com/bwmarrin/discordgo/logging.go create mode 100644 vendor/github.com/bwmarrin/discordgo/message.go create mode 100644 vendor/github.com/bwmarrin/discordgo/oauth2.go create mode 100644 vendor/github.com/bwmarrin/discordgo/ratelimit.go create mode 100644 vendor/github.com/bwmarrin/discordgo/restapi.go create mode 100644 vendor/github.com/bwmarrin/discordgo/state.go create mode 100644 vendor/github.com/bwmarrin/discordgo/structs.go create mode 100644 vendor/github.com/bwmarrin/discordgo/types.go create mode 100644 vendor/github.com/bwmarrin/discordgo/user.go create mode 100644 vendor/github.com/bwmarrin/discordgo/voice.go create mode 100644 vendor/github.com/bwmarrin/discordgo/wsapi.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypass.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/bypasssafe.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/common.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/config.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/doc.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dump.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/format.go create mode 100644 vendor/github.com/davecgh/go-spew/spew/spew.go create mode 100644 vendor/github.com/eapache/go-resiliency/breaker/breaker.go create mode 100644 vendor/github.com/eapache/go-xerial-snappy/snappy.go create mode 100644 vendor/github.com/eapache/queue/queue.go create mode 100644 vendor/github.com/go-logfmt/logfmt/decode.go create mode 100644 vendor/github.com/go-logfmt/logfmt/doc.go create mode 100644 vendor/github.com/go-logfmt/logfmt/encode.go create mode 100644 vendor/github.com/go-logfmt/logfmt/fuzz.go create mode 100644 vendor/github.com/go-logfmt/logfmt/jsonstring.go create mode 100644 vendor/github.com/gogo/protobuf/proto/clone.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/equal.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions.go create mode 100644 vendor/github.com/gogo/protobuf/proto/extensions_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib.go create mode 100644 vendor/github.com/gogo/protobuf/proto/lib_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/message_set.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties.go create mode 100644 vendor/github.com/gogo/protobuf/proto/properties_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/skip_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/text_parser.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/golang/snappy/decode.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.go create mode 100644 vendor/github.com/golang/snappy/decode_amd64.s create mode 100644 vendor/github.com/golang/snappy/decode_other.go create mode 100644 vendor/github.com/golang/snappy/encode.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.go create mode 100644 vendor/github.com/golang/snappy/encode_amd64.s create mode 100644 vendor/github.com/golang/snappy/encode_other.go create mode 100644 vendor/github.com/golang/snappy/snappy.go create mode 100644 vendor/github.com/google/gops/agent/agent.go create mode 100644 vendor/github.com/google/gops/internal/internal.go create mode 100644 vendor/github.com/google/gops/signal/signal.go create mode 100644 vendor/github.com/gorilla/websocket/client.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone.go create mode 100644 vendor/github.com/gorilla/websocket/client_clone_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/compression.go create mode 100644 vendor/github.com/gorilla/websocket/conn.go create mode 100644 vendor/github.com/gorilla/websocket/conn_read.go create mode 100644 vendor/github.com/gorilla/websocket/conn_read_legacy.go create mode 100644 vendor/github.com/gorilla/websocket/doc.go create mode 100644 vendor/github.com/gorilla/websocket/json.go create mode 100644 vendor/github.com/gorilla/websocket/mask.go create mode 100644 vendor/github.com/gorilla/websocket/mask_safe.go create mode 100644 vendor/github.com/gorilla/websocket/prepared.go create mode 100644 vendor/github.com/gorilla/websocket/server.go create mode 100644 vendor/github.com/gorilla/websocket/util.go create mode 100644 vendor/github.com/joho/godotenv/autoload/autoload.go create mode 100644 vendor/github.com/joho/godotenv/godotenv.go create mode 100644 vendor/github.com/justinian/dice/dice.go create mode 100644 vendor/github.com/justinian/dice/eote.go create mode 100644 vendor/github.com/justinian/dice/fudge.go create mode 100644 vendor/github.com/justinian/dice/std.go create mode 100644 vendor/github.com/justinian/dice/versus.go create mode 100644 vendor/github.com/kardianos/osext/osext.go create mode 100644 vendor/github.com/kardianos/osext/osext_go18.go create mode 100644 vendor/github.com/kardianos/osext/osext_plan9.go create mode 100644 vendor/github.com/kardianos/osext/osext_procfs.go create mode 100644 vendor/github.com/kardianos/osext/osext_sysctl.go create mode 100644 vendor/github.com/kardianos/osext/osext_windows.go create mode 100644 vendor/github.com/namsral/flag/extras.go create mode 100644 vendor/github.com/namsral/flag/flag.go create mode 100644 vendor/github.com/nishanths/go-xkcd/client.go create mode 100644 vendor/github.com/nishanths/go-xkcd/comics.go create mode 100644 vendor/github.com/nishanths/go-xkcd/config.go create mode 100644 vendor/github.com/nishanths/go-xkcd/doc.go create mode 100644 vendor/github.com/nishanths/go-xkcd/errors.go create mode 100644 vendor/github.com/nishanths/go-xkcd/random.go create mode 100644 vendor/github.com/opentracing-contrib/go-observer/observer.go create mode 100644 vendor/github.com/opentracing/opentracing-go/ext/tags.go create mode 100644 vendor/github.com/opentracing/opentracing-go/globaltracer.go create mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/field.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/util.go create mode 100644 vendor/github.com/opentracing/opentracing-go/noop.go create mode 100644 vendor/github.com/opentracing/opentracing-go/propagation.go create mode 100644 vendor/github.com/opentracing/opentracing-go/span.go create mode 100644 vendor/github.com/opentracing/opentracing-go/tracer.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/context.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/event.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/span.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/util.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go create mode 100644 vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go create mode 100644 vendor/github.com/pierrec/lz4/block.go create mode 100644 vendor/github.com/pierrec/lz4/lz4.go create mode 100644 vendor/github.com/pierrec/lz4/reader.go create mode 100644 vendor/github.com/pierrec/lz4/writer.go create mode 100644 vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go create mode 100644 vendor/github.com/pkg/errors/errors.go create mode 100644 vendor/github.com/pkg/errors/stack.go create mode 100644 vendor/github.com/rcrowley/go-metrics/counter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/debug.go create mode 100644 vendor/github.com/rcrowley/go-metrics/ewma.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge.go create mode 100644 vendor/github.com/rcrowley/go-metrics/gauge_float64.go create mode 100644 vendor/github.com/rcrowley/go-metrics/graphite.go create mode 100644 vendor/github.com/rcrowley/go-metrics/healthcheck.go create mode 100644 vendor/github.com/rcrowley/go-metrics/histogram.go create mode 100644 vendor/github.com/rcrowley/go-metrics/json.go create mode 100644 vendor/github.com/rcrowley/go-metrics/log.go create mode 100644 vendor/github.com/rcrowley/go-metrics/meter.go create mode 100644 vendor/github.com/rcrowley/go-metrics/metrics.go create mode 100644 vendor/github.com/rcrowley/go-metrics/opentsdb.go create mode 100644 vendor/github.com/rcrowley/go-metrics/registry.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go create mode 100644 vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go create mode 100644 vendor/github.com/rcrowley/go-metrics/sample.go create mode 100644 vendor/github.com/rcrowley/go-metrics/syslog.go create mode 100644 vendor/github.com/rcrowley/go-metrics/timer.go create mode 100644 vendor/github.com/rcrowley/go-metrics/writer.go create mode 100644 vendor/github.com/robfig/cron/constantdelay.go create mode 100644 vendor/github.com/robfig/cron/cron.go create mode 100644 vendor/github.com/robfig/cron/doc.go create mode 100644 vendor/github.com/robfig/cron/parser.go create mode 100644 vendor/github.com/robfig/cron/spec.go create mode 100644 vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go create mode 100644 vendor/golang.org/x/crypto/poly1305/poly1305.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_amd64.s create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_arm.go create mode 100644 vendor/golang.org/x/crypto/poly1305/sum_ref.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go create mode 100644 vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go create mode 100644 vendor/golang.org/x/net/context/context.go create mode 100644 vendor/golang.org/x/net/context/context_test.go create mode 100644 vendor/golang.org/x/net/context/go17.go create mode 100644 vendor/golang.org/x/net/context/go19.go create mode 100644 vendor/golang.org/x/net/context/pre_go17.go create mode 100644 vendor/golang.org/x/net/context/pre_go19.go create mode 100644 vendor/golang.org/x/time/rate/rate.go diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..d7e2efa --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +.env +vyvanse diff --git a/bot/bot.go b/bot/bot.go new file mode 100644 index 0000000..d6e6369 --- /dev/null +++ b/bot/bot.go @@ -0,0 +1,198 @@ +package bot + +import ( + "context" + "errors" + "log" + "strings" + "sync" + "time" + + "github.com/bwmarrin/discordgo" + opentracing "github.com/opentracing/opentracing-go" + otlog "github.com/opentracing/opentracing-go/log" + "golang.org/x/time/rate" +) + +var ( + ErrRateLimitExceeded = errors.New("bot: per-command rate limit exceeded") +) + +type command struct { + aliases []string + verb string + helptext string +} + +func (c *command) Verb() string { + return c.verb +} + +func (c *command) Helptext() string { + return c.helptext +} + +// Handler is the type that bot command functions need to implement. Errors +// should be returned. +type Handler func(context.Context, *discordgo.Session, *discordgo.Message, []string) error + +// CommandHandler is a generic interface for types that implement a bot +// command. It is akin to http.Handler, but more comprehensive. +type CommandHandler interface { + Verb() string + Helptext() string + + Handler(context.Context, *discordgo.Session, *discordgo.Message, []string) error + Permissions(context.Context, *discordgo.Session, *discordgo.Message, []string) error +} + +type basicCommand struct { + *command + handler Handler + permissions Handler + limiter *rate.Limiter +} + +func (bc *basicCommand) Handler(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error { + return bc.handler(ctx, s, m, parv) +} + +func (bc *basicCommand) Permissions(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error { + if !bc.limiter.Allow() { + return ErrRateLimitExceeded + } + + return bc.permissions(ctx, s, m, parv) +} + +// The "default" command set, useful for simple bot projects. +var ( + DefaultCommandSet = NewCommandSet() +) + +// Command handling errors. +var ( + ErrAlreadyExists = errors.New("bot: command already exists") + ErrNoSuchCommand = errors.New("bot: no such command exists") + ErrNoPermissions = errors.New("bot: you do not have permissions for this command") + ErrParvCountMismatch = errors.New("bot: parameter count mismatch") +) + +// The default command prefix. Command `foo` becomes `.foo` in chat, etc. +const ( + DefaultPrefix = "." +) + +// NewCommand creates an anonymous command and adds it to the default CommandSet. +func NewCommand(verb, helptext string, handler, permissions Handler) error { + return DefaultCommandSet.Add(NewBasicCommand(verb, helptext, handler, permissions)) +} + +// NewBasicCommand creates a CommandHandler instance using the implementation +// functions supplied as arguments. +func NewBasicCommand(verb, helptext string, permissions, handler Handler) CommandHandler { + return &basicCommand{ + command: &command{ + verb: verb, + helptext: helptext, + }, + handler: handler, + permissions: permissions, + limiter: rate.NewLimiter(rate.Every(5*time.Second), 1), + } +} + +// CommandSet is a group of bot commands similar to an http.ServeMux. +type CommandSet struct { + sync.Mutex + cmds map[string]CommandHandler + + Prefix string +} + +// NewCommandSet creates a new command set with the `help` command pre-loaded. +func NewCommandSet() *CommandSet { + cs := &CommandSet{ + cmds: map[string]CommandHandler{}, + Prefix: DefaultPrefix, + } + + cs.AddCmd("help", "Shows help for the bot", NoPermissions, cs.help) + + return cs +} + +// NoPermissions is a simple middelware function that allows all command invocations +// to pass the permissions check. +func NoPermissions(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error { + return nil +} + +// AddCmd is syntactic sugar for cs.Add(NewBasicCommand(args...)) +func (cs *CommandSet) AddCmd(verb, helptext string, permissions, handler Handler) error { + return cs.Add(NewBasicCommand(verb, helptext, permissions, handler)) +} + +// Add adds a single command handler to the CommandSet. This can be done at runtime +// but it is suggested to only add commands on application boot. +func (cs *CommandSet) Add(h CommandHandler) error { + cs.Lock() + defer cs.Unlock() + + v := strings.ToLower(h.Verb()) + + if _, ok := cs.cmds[v]; ok { + return ErrAlreadyExists + } + + cs.cmds[v] = h + + return nil +} + +// Run makes a CommandSet compatible with discordgo event dispatching. +func (cs *CommandSet) Run(s *discordgo.Session, msg *discordgo.Message) error { + cs.Lock() + defer cs.Unlock() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + sp, ctx := opentracing.StartSpanFromContext(ctx, "CommandSet.Run") + defer sp.Finish() + + if strings.HasPrefix(msg.Content, cs.Prefix) { + params := strings.Fields(msg.Content) + verb := strings.ToLower(params[0][1:]) + + sp.LogFields( + otlog.String("message-id", msg.ID), + otlog.String("author", msg.Author.ID), + otlog.String("channel-id", msg.ChannelID), + otlog.String("verb", verb), + otlog.Int("parc", len(params)), + ) + + cmd, ok := cs.cmds[verb] + if !ok { + return ErrNoSuchCommand + } + + err := cmd.Permissions(ctx, s, msg, params) + if err != nil { + sp.LogFields(otlog.Error(err)) + log.Printf("Permissions error: %s: %v", msg.Author.Username, err) + s.ChannelMessageSend(msg.ChannelID, "You don't have permissions for that, sorry.") + return ErrNoPermissions + } + + err = cmd.Handler(ctx, s, msg, params) + if err != nil { + log.Printf("command handler error: %v", err) + s.ChannelMessageSend(msg.ChannelID, "error when running that command: "+err.Error()) + return err + } + } + + return nil +} diff --git a/bot/doc.go b/bot/doc.go new file mode 100644 index 0000000..d5c5af5 --- /dev/null +++ b/bot/doc.go @@ -0,0 +1,10 @@ +/* +Package bot contains some generically useful bot rigging for Discord chatbots. + +This package works by defining command handlers in a CommandSet, and then dispatching +based on message contents. If the bot's command prefix is `;`, then `;foo` activates +the command handler for command `foo`. + +A CommandSet has a mutex baked into it for convenience of command implementation. +*/ +package bot diff --git a/bot/help.go b/bot/help.go new file mode 100644 index 0000000..c86d025 --- /dev/null +++ b/bot/help.go @@ -0,0 +1,29 @@ +package bot + +import ( + "context" + "fmt" + + "github.com/bwmarrin/discordgo" +) + +func (cs *CommandSet) help(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error { + switch len(parv) { + case 1: + // print all help on all commands + result := "Bot commands: \n" + + for verb, cmd := range cs.cmds { + result += fmt.Sprintf("%s%s: %s\n", cs.Prefix, verb, cmd.Helptext()) + } + + result += "If there's any problems please don't hesitate to ask a server admin for help." + + s.ChannelMessageSend(m.ChannelID, result) + + default: + return ErrParvCountMismatch + } + + return nil +} diff --git a/box.rb b/box.rb new file mode 100644 index 0000000..a2deeb4 --- /dev/null +++ b/box.rb @@ -0,0 +1,32 @@ +$repo = "git.xeserv.us/xena/vyvanse" +$gover = "1.8.3" + +from "xena/go-mini:#{$gover}" +run "go#{$gover} download" + +def foldercopy(dir) + copy "#{dir}", "/root/go/src/#{$repo}/#{dir}" +end + +def gobuild(pkg) + run "go#{$gover} build #{$repo}/#{pkg} && go#{$gover} install #{$repo}/#{pkg}" +end + +folders = [ + "bot", + "cmd", + "vendor", + "vendor-log" +] + +folders.each { |x| foldercopy x } + +gobuild "cmd/vyvanse" + +cmd "/root/go/bin/vyvanse" + +run "rm -rf $HOME/sdk /root/go/pkg" +run "apk del go#{$gover}" +flatten + +tag "xena/vyvanse" diff --git a/config.nims b/config.nims new file mode 100644 index 0000000..e2b3169 --- /dev/null +++ b/config.nims @@ -0,0 +1,7 @@ +task up, "starts docker-compose services": + exec "box box.rb" + exec "docker-compose up -d" + +task down, "stops docker-compose services": + exec "docker-compose down" + diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..c14a478 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,15 @@ +version: "3" + +services: + zipkin: + image: openzipkin/zipkin + environment: + STORAGE_TYPE: mem + ports: + - "9411:9411" + + vyvanse: + image: xena/vyvanse + env_file: ./.env + depends_on: + - zipkin diff --git a/vendor-log b/vendor-log new file mode 100644 index 0000000..71af920 --- /dev/null +++ b/vendor-log @@ -0,0 +1,41 @@ +55b3f6a77f08038b1e82c3db3c1ce619e46de9b6 github.com/Shopify/sarama +905d0b5876706403fc746bc18dee25d2fecc2518 github.com/Xe/ln +ec64f23d236d7874e3b28ae86c833f57c7aa3389 github.com/apache/thrift/lib/go/thrift +d420e28024ad527390b43aa7f64e029083e11989 github.com/bwmarrin/discordgo +adab96458c51a58dc1783b3335dcce5461522e75 github.com/davecgh/go-spew/spew +b1fe83b5b03f624450823b751b662259ffc6af70 github.com/eapache/go-resiliency/breaker +bb955e01b9346ac19dc29eb16586c90ded99a98c github.com/eapache/go-xerial-snappy +44cc805cf13205b55f69e14bcb69867d1ae92f98 github.com/eapache/queue +390ab7935ee28ec6b286364bba9b4dd6410cb3d5 github.com/go-logfmt/logfmt +fcdc5011193ff531a548e9b0301828d5a5b97fd8 github.com/gogo/protobuf/proto +553a641470496b2327abcac10b36396bd98e45c9 github.com/golang/snappy +806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/agent +806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/internal +806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/signal +a69d9f6de432e2c6b296a947d8a5ee88f68522cf github.com/gorilla/websocket +9d9ddadf44b4c17c42bafdc530ddeee1927c067d github.com/joho/godotenv +9d9ddadf44b4c17c42bafdc530ddeee1927c067d github.com/joho/godotenv/autoload +67f268f20922975c067ed799e4be6bacf152208c github.com/namsral/flag +9497d909de390b9a8dd0d88975cf0ce7d0b4d688 github.com/nishanths/go-xkcd +a52f2342449246d5bcc273e65cbdcfa5f7d6c63c github.com/opentracing-contrib/go-observer +8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go +8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go/ext +8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go/log +37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing +37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/flag +37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe +37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore +37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/types +37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/wire +5a3d2245f97fc249850e7802e3c01fad02a1c316 github.com/pierrec/lz4 +a0006b13c722f7f12368c00a3d3c2ae8a999a0c6 github.com/pierrec/xxHash/xxHash32 +c605e284fe17294bda444b34710735b29d1a9d90 github.com/pkg/errors +1f30fe9094a513ce4c700b9a54458bbb0c96996c github.com/rcrowley/go-metrics +736158dc09e10f1911ca3a1e1b01f11b566ce5db github.com/robfig/cron +42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/nacl/secretbox +42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/poly1305 +42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/salsa20/salsa +f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f golang.org/x/net/context +f51c12702a4d776e4c1fa9b0fabab841babae631 golang.org/x/time/rate +ae77be60afb1dcacde03767a8c37337fad28ac14 github.com/kardianos/osext +6a18b51d929caddbe795e1609195dee1d1cc729e github.com/justinian/dice diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go new file mode 100644 index 0000000..ab65f01 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_request.go @@ -0,0 +1,24 @@ +package sarama + +type ApiVersionsRequest struct { +} + +func (r *ApiVersionsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ApiVersionsRequest) key() int16 { + return 18 +} + +func (r *ApiVersionsRequest) version() int16 { + return 0 +} + +func (r *ApiVersionsRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go new file mode 100644 index 0000000..23bc326 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/api_versions_response.go @@ -0,0 +1,87 @@ +package sarama + +type ApiVersionsResponseBlock struct { + ApiKey int16 + MinVersion int16 + MaxVersion int16 +} + +func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error { + pe.putInt16(b.ApiKey) + pe.putInt16(b.MinVersion) + pe.putInt16(b.MaxVersion) + return nil +} + +func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error { + var err error + + if b.ApiKey, err = pd.getInt16(); err != nil { + return err + } + + if b.MinVersion, err = pd.getInt16(); err != nil { + return err + } + + if b.MaxVersion, err = pd.getInt16(); err != nil { + return err + } + + return nil +} + +type ApiVersionsResponse struct { + Err KError + ApiVersions []*ApiVersionsResponseBlock +} + +func (r *ApiVersionsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if err := pe.putArrayLength(len(r.ApiVersions)); err != nil { + return err + } + for _, apiVersion := range r.ApiVersions { + if err := apiVersion.encode(pe); err != nil { + return err + } + } + return nil +} + +func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks) + for i := 0; i < numBlocks; i++ { + block := new(ApiVersionsResponseBlock) + if err := block.decode(pd); err != nil { + return err + } + r.ApiVersions[i] = block + } + + return nil +} + +func (r *ApiVersionsResponse) key() int16 { + return 18 +} + +func (r *ApiVersionsResponse) version() int16 { + return 0 +} + +func (r *ApiVersionsResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go new file mode 100644 index 0000000..6d71a6d --- /dev/null +++ b/vendor/github.com/Shopify/sarama/async_producer.go @@ -0,0 +1,904 @@ +package sarama + +import ( + "fmt" + "sync" + "time" + + "github.com/eapache/go-resiliency/breaker" + "github.com/eapache/queue" +) + +// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages +// to the correct broker for the provided topic-partition, refreshing metadata as appropriate, +// and parses responses for errors. You must read from the Errors() channel or the +// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid +// leaks: it will not be garbage-collected automatically when it passes out of +// scope. +type AsyncProducer interface { + + // AsyncClose triggers a shutdown of the producer. The shutdown has completed + // when both the Errors and Successes channels have been closed. When calling + // AsyncClose, you *must* continue to read from those channels in order to + // drain the results of any messages in flight. + AsyncClose() + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error + + // Input is the input channel for the user to write messages to that they + // wish to send. + Input() chan<- *ProducerMessage + + // Successes is the success output channel back to the user when Return.Successes is + // enabled. If Return.Successes is true, you MUST read from this channel or the + // Producer will deadlock. It is suggested that you send and read messages + // together in a single select statement. + Successes() <-chan *ProducerMessage + + // Errors is the error output channel back to the user. You MUST read from this + // channel or the Producer will deadlock when the channel is full. Alternatively, + // you can set Producer.Return.Errors in your config to false, which prevents + // errors to be returned. + Errors() <-chan *ProducerError +} + +type asyncProducer struct { + client Client + conf *Config + ownClient bool + + errors chan *ProducerError + input, successes, retries chan *ProducerMessage + inFlight sync.WaitGroup + + brokers map[*Broker]chan<- *ProducerMessage + brokerRefs map[chan<- *ProducerMessage]int + brokerLock sync.Mutex +} + +// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration. +func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) { + client, err := NewClient(addrs, conf) + if err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + p.(*asyncProducer).ownClient = true + return p, nil +} + +// NewAsyncProducerFromClient creates a new Producer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + p := &asyncProducer{ + client: client, + conf: client.Config(), + errors: make(chan *ProducerError), + input: make(chan *ProducerMessage), + successes: make(chan *ProducerMessage), + retries: make(chan *ProducerMessage), + brokers: make(map[*Broker]chan<- *ProducerMessage), + brokerRefs: make(map[chan<- *ProducerMessage]int), + } + + // launch our singleton dispatchers + go withRecover(p.dispatcher) + go withRecover(p.retryHandler) + + return p, nil +} + +type flagSet int8 + +const ( + syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer + fin // final message from partitionProducer to brokerProducer and back + shutdown // start the shutdown process +) + +// ProducerMessage is the collection of elements passed to the Producer in order to send a message. +type ProducerMessage struct { + Topic string // The Kafka topic for this message. + // The partitioning key for this message. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Key Encoder + // The actual message to store in Kafka. Pre-existing Encoders include + // StringEncoder and ByteEncoder. + Value Encoder + + // This field is used to hold arbitrary data you wish to include so it + // will be available when receiving on the Successes and Errors channels. + // Sarama completely ignores this field and is only to be used for + // pass-through data. + Metadata interface{} + + // Below this point are filled in by the producer as the message is processed + + // Offset is the offset of the message stored on the broker. This is only + // guaranteed to be defined if the message was successfully delivered and + // RequiredAcks is not NoResponse. + Offset int64 + // Partition is the partition that the message was sent to. This is only + // guaranteed to be defined if the message was successfully delivered. + Partition int32 + // Timestamp is the timestamp assigned to the message by the broker. This + // is only guaranteed to be defined if the message was successfully + // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at + // least version 0.10.0. + Timestamp time.Time + + retries int + flags flagSet +} + +const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc. + +func (m *ProducerMessage) byteSize() int { + size := producerMessageOverhead + if m.Key != nil { + size += m.Key.Length() + } + if m.Value != nil { + size += m.Value.Length() + } + return size +} + +func (m *ProducerMessage) clear() { + m.flags = 0 + m.retries = 0 +} + +// ProducerError is the type of error generated when the producer fails to deliver a message. +// It contains the original ProducerMessage as well as the actual error value. +type ProducerError struct { + Msg *ProducerMessage + Err error +} + +func (pe ProducerError) Error() string { + return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err) +} + +// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface. +// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel +// when closing a producer. +type ProducerErrors []*ProducerError + +func (pe ProducerErrors) Error() string { + return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe)) +} + +func (p *asyncProducer) Errors() <-chan *ProducerError { + return p.errors +} + +func (p *asyncProducer) Successes() <-chan *ProducerMessage { + return p.successes +} + +func (p *asyncProducer) Input() chan<- *ProducerMessage { + return p.input +} + +func (p *asyncProducer) Close() error { + p.AsyncClose() + + if p.conf.Producer.Return.Successes { + go withRecover(func() { + for range p.successes { + } + }) + } + + var errors ProducerErrors + if p.conf.Producer.Return.Errors { + for event := range p.errors { + errors = append(errors, event) + } + } else { + <-p.errors + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (p *asyncProducer) AsyncClose() { + go withRecover(p.shutdown) +} + +// singleton +// dispatches messages by topic +func (p *asyncProducer) dispatcher() { + handlers := make(map[string]chan<- *ProducerMessage) + shuttingDown := false + + for msg := range p.input { + if msg == nil { + Logger.Println("Something tried to send a nil message, it was ignored.") + continue + } + + if msg.flags&shutdown != 0 { + shuttingDown = true + p.inFlight.Done() + continue + } else if msg.retries == 0 { + if shuttingDown { + // we can't just call returnError here because that decrements the wait group, + // which hasn't been incremented yet for this message, and shouldn't be + pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + continue + } + p.inFlight.Add(1) + } + + if msg.byteSize() > p.conf.Producer.MaxMessageBytes { + p.returnError(msg, ErrMessageSizeTooLarge) + continue + } + + handler := handlers[msg.Topic] + if handler == nil { + handler = p.newTopicProducer(msg.Topic) + handlers[msg.Topic] = handler + } + + handler <- msg + } + + for _, handler := range handlers { + close(handler) + } +} + +// one per topic +// partitions messages, then dispatches them by partition +type topicProducer struct { + parent *asyncProducer + topic string + input <-chan *ProducerMessage + + breaker *breaker.Breaker + handlers map[int32]chan<- *ProducerMessage + partitioner Partitioner +} + +func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + tp := &topicProducer{ + parent: p, + topic: topic, + input: input, + breaker: breaker.New(3, 1, 10*time.Second), + handlers: make(map[int32]chan<- *ProducerMessage), + partitioner: p.conf.Producer.Partitioner(topic), + } + go withRecover(tp.dispatch) + return input +} + +func (tp *topicProducer) dispatch() { + for msg := range tp.input { + if msg.retries == 0 { + if err := tp.partitionMessage(msg); err != nil { + tp.parent.returnError(msg, err) + continue + } + } + + handler := tp.handlers[msg.Partition] + if handler == nil { + handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition) + tp.handlers[msg.Partition] = handler + } + + handler <- msg + } + + for _, handler := range tp.handlers { + close(handler) + } +} + +func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error { + var partitions []int32 + + err := tp.breaker.Run(func() (err error) { + if tp.partitioner.RequiresConsistency() { + partitions, err = tp.parent.client.Partitions(msg.Topic) + } else { + partitions, err = tp.parent.client.WritablePartitions(msg.Topic) + } + return + }) + + if err != nil { + return err + } + + numPartitions := int32(len(partitions)) + + if numPartitions == 0 { + return ErrLeaderNotAvailable + } + + choice, err := tp.partitioner.Partition(msg, numPartitions) + + if err != nil { + return err + } else if choice < 0 || choice >= numPartitions { + return ErrInvalidPartition + } + + msg.Partition = partitions[choice] + + return nil +} + +// one per partition per topic +// dispatches messages to the appropriate broker +// also responsible for maintaining message order during retries +type partitionProducer struct { + parent *asyncProducer + topic string + partition int32 + input <-chan *ProducerMessage + + leader *Broker + breaker *breaker.Breaker + output chan<- *ProducerMessage + + // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through, + // all other messages get buffered in retryState[msg.retries].buf to preserve ordering + // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and + // therefore whether our buffer is complete and safe to flush) + highWatermark int + retryState []partitionRetryState +} + +type partitionRetryState struct { + buf []*ProducerMessage + expectChaser bool +} + +func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage { + input := make(chan *ProducerMessage, p.conf.ChannelBufferSize) + pp := &partitionProducer{ + parent: p, + topic: topic, + partition: partition, + input: input, + + breaker: breaker.New(3, 1, 10*time.Second), + retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1), + } + go withRecover(pp.dispatch) + return input +} + +func (pp *partitionProducer) dispatch() { + // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader` + // on the first message + pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition) + if pp.leader != nil { + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + } + + for msg := range pp.input { + if msg.retries > pp.highWatermark { + // a new, higher, retry level; handle it and then back off + pp.newHighWatermark(msg.retries) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + } else if pp.highWatermark > 0 { + // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level + if msg.retries < pp.highWatermark { + // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin) + if msg.flags&fin == fin { + pp.retryState[msg.retries].expectChaser = false + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + } else { + pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg) + } + continue + } else if msg.flags&fin == fin { + // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set, + // meaning this retry level is done and we can go down (at least) one level and flush that + pp.retryState[pp.highWatermark].expectChaser = false + pp.flushRetryBuffers() + pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected + continue + } + } + + // if we made it this far then the current msg contains real data, and can be sent to the next goroutine + // without breaking any of our ordering guarantees + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnError(msg, err) + time.Sleep(pp.parent.conf.Producer.Retry.Backoff) + continue + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + pp.output <- msg + } + + if pp.output != nil { + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + } +} + +func (pp *partitionProducer) newHighWatermark(hwm int) { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm) + pp.highWatermark = hwm + + // send off a fin so that we know when everything "in between" has made it + // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages) + pp.retryState[pp.highWatermark].expectChaser = true + pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1} + + // a new HWM means that our current broker selection is out of date + Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + pp.parent.unrefBrokerProducer(pp.leader, pp.output) + pp.output = nil +} + +func (pp *partitionProducer) flushRetryBuffers() { + Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark) + for { + pp.highWatermark-- + + if pp.output == nil { + if err := pp.updateLeader(); err != nil { + pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err) + goto flushDone + } + Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID()) + } + + for _, msg := range pp.retryState[pp.highWatermark].buf { + pp.output <- msg + } + + flushDone: + pp.retryState[pp.highWatermark].buf = nil + if pp.retryState[pp.highWatermark].expectChaser { + Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark) + break + } else if pp.highWatermark == 0 { + Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition) + break + } + } +} + +func (pp *partitionProducer) updateLeader() error { + return pp.breaker.Run(func() (err error) { + if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil { + return err + } + + if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil { + return err + } + + pp.output = pp.parent.getBrokerProducer(pp.leader) + pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight + pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn} + + return nil + }) +} + +// one per broker; also constructs an associated flusher +func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage { + var ( + input = make(chan *ProducerMessage) + bridge = make(chan *produceSet) + responses = make(chan *brokerProducerResponse) + ) + + bp := &brokerProducer{ + parent: p, + broker: broker, + input: input, + output: bridge, + responses: responses, + buffer: newProduceSet(p), + currentRetries: make(map[string]map[int32]error), + } + go withRecover(bp.run) + + // minimal bridge to make the network response `select`able + go withRecover(func() { + for set := range bridge { + request := set.buildRequest() + + response, err := broker.Produce(request) + + responses <- &brokerProducerResponse{ + set: set, + err: err, + res: response, + } + } + close(responses) + }) + + return input +} + +type brokerProducerResponse struct { + set *produceSet + err error + res *ProduceResponse +} + +// groups messages together into appropriately-sized batches for sending to the broker +// handles state related to retries etc +type brokerProducer struct { + parent *asyncProducer + broker *Broker + + input <-chan *ProducerMessage + output chan<- *produceSet + responses <-chan *brokerProducerResponse + + buffer *produceSet + timer <-chan time.Time + timerFired bool + + closing error + currentRetries map[string]map[int32]error +} + +func (bp *brokerProducer) run() { + var output chan<- *produceSet + Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID()) + + for { + select { + case msg := <-bp.input: + if msg == nil { + bp.shutdown() + return + } + + if msg.flags&syn == syn { + Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + if bp.currentRetries[msg.Topic] == nil { + bp.currentRetries[msg.Topic] = make(map[int32]error) + } + bp.currentRetries[msg.Topic][msg.Partition] = nil + bp.parent.inFlight.Done() + continue + } + + if reason := bp.needsRetry(msg); reason != nil { + bp.parent.retryMessage(msg, reason) + + if bp.closing == nil && msg.flags&fin == fin { + // we were retrying this partition but we can start processing again + delete(bp.currentRetries[msg.Topic], msg.Partition) + Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n", + bp.broker.ID(), msg.Topic, msg.Partition) + } + + continue + } + + if bp.buffer.wouldOverflow(msg) { + if err := bp.waitForSpace(msg); err != nil { + bp.parent.retryMessage(msg, err) + continue + } + } + + if err := bp.buffer.add(msg); err != nil { + bp.parent.returnError(msg, err) + continue + } + + if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil { + bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency) + } + case <-bp.timer: + bp.timerFired = true + case output <- bp.buffer: + bp.rollOver() + case response := <-bp.responses: + bp.handleResponse(response) + } + + if bp.timerFired || bp.buffer.readyToFlush() { + output = bp.output + } else { + output = nil + } + } +} + +func (bp *brokerProducer) shutdown() { + for !bp.buffer.empty() { + select { + case response := <-bp.responses: + bp.handleResponse(response) + case bp.output <- bp.buffer: + bp.rollOver() + } + } + close(bp.output) + for response := range bp.responses { + bp.handleResponse(response) + } + + Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID()) +} + +func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error { + if bp.closing != nil { + return bp.closing + } + + return bp.currentRetries[msg.Topic][msg.Partition] +} + +func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error { + Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID()) + + for { + select { + case response := <-bp.responses: + bp.handleResponse(response) + // handling a response can change our state, so re-check some things + if reason := bp.needsRetry(msg); reason != nil { + return reason + } else if !bp.buffer.wouldOverflow(msg) { + return nil + } + case bp.output <- bp.buffer: + bp.rollOver() + return nil + } + } +} + +func (bp *brokerProducer) rollOver() { + bp.timer = nil + bp.timerFired = false + bp.buffer = newProduceSet(bp.parent) +} + +func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) { + if response.err != nil { + bp.handleError(response.set, response.err) + } else { + bp.handleSuccess(response.set, response.res) + } + + if bp.buffer.empty() { + bp.rollOver() // this can happen if the response invalidated our buffer + } +} + +func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) { + // we iterate through the blocks in the request set, not the response, so that we notice + // if the response is missing a block completely + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + if response == nil { + // this only happens when RequiredAcks is NoResponse, so we have to assume success + bp.parent.returnSuccesses(msgs) + return + } + + block := response.GetBlock(topic, partition) + if block == nil { + bp.parent.returnErrors(msgs, ErrIncompleteResponse) + return + } + + switch block.Err { + // Success + case ErrNoError: + if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() { + for _, msg := range msgs { + msg.Timestamp = block.Timestamp + } + } + for i, msg := range msgs { + msg.Offset = block.Offset + int64(i) + } + bp.parent.returnSuccesses(msgs) + // Retriable errors + case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition, + ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend: + Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n", + bp.broker.ID(), topic, partition, block.Err) + bp.currentRetries[topic][partition] = block.Err + bp.parent.retryMessages(msgs, block.Err) + bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err) + // Other non-retriable errors + default: + bp.parent.returnErrors(msgs, block.Err) + } + }) +} + +func (bp *brokerProducer) handleError(sent *produceSet, err error) { + switch err.(type) { + case PacketEncodingError: + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.returnErrors(msgs, err) + }) + default: + Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err) + bp.parent.abandonBrokerConnection(bp.broker) + _ = bp.broker.Close() + bp.closing = err + sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) { + bp.parent.retryMessages(msgs, err) + }) + bp.rollOver() + } +} + +// singleton +// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock +// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel +func (p *asyncProducer) retryHandler() { + var msg *ProducerMessage + buf := queue.New() + + for { + if buf.Length() == 0 { + msg = <-p.retries + } else { + select { + case msg = <-p.retries: + case p.input <- buf.Peek().(*ProducerMessage): + buf.Remove() + continue + } + } + + if msg == nil { + return + } + + buf.Add(msg) + } +} + +// utility functions + +func (p *asyncProducer) shutdown() { + Logger.Println("Producer shutting down.") + p.inFlight.Add(1) + p.input <- &ProducerMessage{flags: shutdown} + + p.inFlight.Wait() + + if p.ownClient { + err := p.client.Close() + if err != nil { + Logger.Println("producer/shutdown failed to close the embedded client:", err) + } + } + + close(p.input) + close(p.retries) + close(p.errors) + close(p.successes) +} + +func (p *asyncProducer) returnError(msg *ProducerMessage, err error) { + msg.clear() + pErr := &ProducerError{Msg: msg, Err: err} + if p.conf.Producer.Return.Errors { + p.errors <- pErr + } else { + Logger.Println(pErr) + } + p.inFlight.Done() +} + +func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.returnError(msg, err) + } +} + +func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) { + for _, msg := range batch { + if p.conf.Producer.Return.Successes { + msg.clear() + p.successes <- msg + } + p.inFlight.Done() + } +} + +func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) { + if msg.retries >= p.conf.Producer.Retry.Max { + p.returnError(msg, err) + } else { + msg.retries++ + p.retries <- msg + } +} + +func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) { + for _, msg := range batch { + p.retryMessage(msg, err) + } +} + +func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + bp := p.brokers[broker] + + if bp == nil { + bp = p.newBrokerProducer(broker) + p.brokers[broker] = bp + p.brokerRefs[bp] = 0 + } + + p.brokerRefs[bp]++ + + return bp +} + +func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + p.brokerRefs[bp]-- + if p.brokerRefs[bp] == 0 { + close(bp) + delete(p.brokerRefs, bp) + + if p.brokers[broker] == bp { + delete(p.brokers, broker) + } + } +} + +func (p *asyncProducer) abandonBrokerConnection(broker *Broker) { + p.brokerLock.Lock() + defer p.brokerLock.Unlock() + + delete(p.brokers, broker) +} diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go new file mode 100644 index 0000000..f57a690 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/broker.go @@ -0,0 +1,685 @@ +package sarama + +import ( + "crypto/tls" + "encoding/binary" + "fmt" + "io" + "net" + "strconv" + "sync" + "sync/atomic" + "time" + + "github.com/rcrowley/go-metrics" +) + +// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe. +type Broker struct { + id int32 + addr string + + conf *Config + correlationID int32 + conn net.Conn + connErr error + lock sync.Mutex + opened int32 + + responses chan responsePromise + done chan bool + + incomingByteRate metrics.Meter + requestRate metrics.Meter + requestSize metrics.Histogram + requestLatency metrics.Histogram + outgoingByteRate metrics.Meter + responseRate metrics.Meter + responseSize metrics.Histogram + brokerIncomingByteRate metrics.Meter + brokerRequestRate metrics.Meter + brokerRequestSize metrics.Histogram + brokerRequestLatency metrics.Histogram + brokerOutgoingByteRate metrics.Meter + brokerResponseRate metrics.Meter + brokerResponseSize metrics.Histogram +} + +type responsePromise struct { + requestTime time.Time + correlationID int32 + packets chan []byte + errors chan error +} + +// NewBroker creates and returns a Broker targeting the given host:port address. +// This does not attempt to actually connect, you have to call Open() for that. +func NewBroker(addr string) *Broker { + return &Broker{id: -1, addr: addr} +} + +// Open tries to connect to the Broker if it is not already connected or connecting, but does not block +// waiting for the connection to complete. This means that any subsequent operations on the broker will +// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call, +// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or +// AlreadyConnected. If conf is nil, the result of NewConfig() is used. +func (b *Broker) Open(conf *Config) error { + if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) { + return ErrAlreadyConnected + } + + if conf == nil { + conf = NewConfig() + } + + err := conf.Validate() + if err != nil { + return err + } + + b.lock.Lock() + + go withRecover(func() { + defer b.lock.Unlock() + + dialer := net.Dialer{ + Timeout: conf.Net.DialTimeout, + KeepAlive: conf.Net.KeepAlive, + } + + if conf.Net.TLS.Enable { + b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config) + } else { + b.conn, b.connErr = dialer.Dial("tcp", b.addr) + } + if b.connErr != nil { + Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr) + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + b.conn = newBufConn(b.conn) + + b.conf = conf + + // Create or reuse the global metrics shared between brokers + b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry) + b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry) + b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry) + b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry) + b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry) + b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry) + b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry) + // Do not gather metrics for seeded broker (only used during bootstrap) because they share + // the same id (-1) and are already exposed through the global metrics above + if b.id >= 0 { + b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry) + b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry) + b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry) + b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry) + b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry) + b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry) + b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry) + } + + if conf.Net.SASL.Enable { + b.connErr = b.sendAndReceiveSASLPlainAuth() + if b.connErr != nil { + err = b.conn.Close() + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + b.conn = nil + atomic.StoreInt32(&b.opened, 0) + return + } + } + + b.done = make(chan bool) + b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1) + + if b.id >= 0 { + Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id) + } else { + Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr) + } + go withRecover(b.responseReceiver) + }) + + return nil +} + +// Connected returns true if the broker is connected and false otherwise. If the broker is not +// connected but it had tried to connect, the error from that connection attempt is also returned. +func (b *Broker) Connected() (bool, error) { + b.lock.Lock() + defer b.lock.Unlock() + + return b.conn != nil, b.connErr +} + +func (b *Broker) Close() error { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + return ErrNotConnected + } + + close(b.responses) + <-b.done + + err := b.conn.Close() + + b.conn = nil + b.connErr = nil + b.done = nil + b.responses = nil + + if err == nil { + Logger.Printf("Closed connection to broker %s\n", b.addr) + } else { + Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err) + } + + atomic.StoreInt32(&b.opened, 0) + + return err +} + +// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known. +func (b *Broker) ID() int32 { + return b.id +} + +// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker. +func (b *Broker) Addr() string { + return b.addr +} + +func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) { + response := new(MetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) { + response := new(ConsumerMetadataResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) { + response := new(OffsetResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) { + var response *ProduceResponse + var err error + + if request.RequiredAcks == NoResponse { + err = b.sendAndReceive(request, nil) + } else { + response = new(ProduceResponse) + err = b.sendAndReceive(request, response) + } + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) { + response := new(FetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) { + response := new(OffsetCommitResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) { + response := new(OffsetFetchResponse) + + err := b.sendAndReceive(request, response) + + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) { + response := new(JoinGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) { + response := new(SyncGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) { + response := new(LeaveGroupResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) { + response := new(HeartbeatResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) { + response := new(ListGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) { + response := new(DescribeGroupsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) { + response := new(ApiVersionsResponse) + + err := b.sendAndReceive(request, response) + if err != nil { + return nil, err + } + + return response, nil +} + +func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) { + b.lock.Lock() + defer b.lock.Unlock() + + if b.conn == nil { + if b.connErr != nil { + return nil, b.connErr + } + return nil, ErrNotConnected + } + + if !b.conf.Version.IsAtLeast(rb.requiredVersion()) { + return nil, ErrUnsupportedVersion + } + + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return nil, err + } + + err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + return nil, err + } + + requestTime := time.Now() + bytes, err := b.conn.Write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + return nil, err + } + b.correlationID++ + + if !promiseResponse { + // Record request latency without the response + b.updateRequestLatencyMetrics(time.Since(requestTime)) + return nil, nil + } + + promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)} + b.responses <- promise + + return &promise, nil +} + +func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error { + promise, err := b.send(req, res != nil) + + if err != nil { + return err + } + + if promise == nil { + return nil + } + + select { + case buf := <-promise.packets: + return versionedDecode(buf, res, req.version()) + case err = <-promise.errors: + return err + } +} + +func (b *Broker) decode(pd packetDecoder) (err error) { + b.id, err = pd.getInt32() + if err != nil { + return err + } + + host, err := pd.getString() + if err != nil { + return err + } + + port, err := pd.getInt32() + if err != nil { + return err + } + + b.addr = net.JoinHostPort(host, fmt.Sprint(port)) + if _, _, err := net.SplitHostPort(b.addr); err != nil { + return err + } + + return nil +} + +func (b *Broker) encode(pe packetEncoder) (err error) { + + host, portstr, err := net.SplitHostPort(b.addr) + if err != nil { + return err + } + port, err := strconv.Atoi(portstr) + if err != nil { + return err + } + + pe.putInt32(b.id) + + err = pe.putString(host) + if err != nil { + return err + } + + pe.putInt32(int32(port)) + + return nil +} + +func (b *Broker) responseReceiver() { + var dead error + header := make([]byte, 8) + for response := range b.responses { + if dead != nil { + response.errors <- dead + continue + } + + err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout)) + if err != nil { + dead = err + response.errors <- err + continue + } + + bytesReadHeader, err := io.ReadFull(b.conn, header) + requestLatency := time.Since(response.requestTime) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + + decodedHeader := responseHeader{} + err = decode(header, &decodedHeader) + if err != nil { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + dead = err + response.errors <- err + continue + } + if decodedHeader.correlationID != response.correlationID { + b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency) + // TODO if decoded ID < cur ID, discard until we catch up + // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response + dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)} + response.errors <- dead + continue + } + + buf := make([]byte, decodedHeader.length-4) + bytesReadBody, err := io.ReadFull(b.conn, buf) + b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency) + if err != nil { + dead = err + response.errors <- err + continue + } + + response.packets <- buf + } + close(b.done) +} + +func (b *Broker) sendAndReceiveSASLPlainHandshake() error { + rb := &SaslHandshakeRequest{"PLAIN"} + req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb} + buf, err := encode(req, b.conf.MetricRegistry) + if err != nil { + return err + } + + err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + return err + } + + requestTime := time.Now() + bytes, err := b.conn.Write(buf) + b.updateOutgoingCommunicationMetrics(bytes) + if err != nil { + Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error()) + return err + } + b.correlationID++ + //wait for the response + header := make([]byte, 8) // response header + _, err = io.ReadFull(b.conn, header) + if err != nil { + Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error()) + return err + } + length := binary.BigEndian.Uint32(header[:4]) + payload := make([]byte, length-4) + n, err := io.ReadFull(b.conn, payload) + if err != nil { + Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error()) + return err + } + b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime)) + res := &SaslHandshakeResponse{} + err = versionedDecode(payload, res, 0) + if err != nil { + Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error()) + return err + } + if res.Err != ErrNoError { + Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error()) + return res.Err + } + Logger.Print("Successful SASL handshake") + return nil +} + +// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149) +// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9 +// +// In SASL Plain, Kafka expects the auth header to be in the following format +// Message format (from https://tools.ietf.org/html/rfc4616): +// +// message = [authzid] UTF8NUL authcid UTF8NUL passwd +// authcid = 1*SAFE ; MUST accept up to 255 octets +// authzid = 1*SAFE ; MUST accept up to 255 octets +// passwd = 1*SAFE ; MUST accept up to 255 octets +// UTF8NUL = %x00 ; UTF-8 encoded NUL character +// +// SAFE = UTF1 / UTF2 / UTF3 / UTF4 +// ;; any UTF-8 encoded Unicode character except NUL +// +// When credentials are valid, Kafka returns a 4 byte array of null characters. +// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way +// of responding to bad credentials but thats how its being done today. +func (b *Broker) sendAndReceiveSASLPlainAuth() error { + if b.conf.Net.SASL.Handshake { + handshakeErr := b.sendAndReceiveSASLPlainHandshake() + if handshakeErr != nil { + Logger.Printf("Error while performing SASL handshake %s\n", b.addr) + return handshakeErr + } + } + length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password) + authBytes := make([]byte, length+4) //4 byte length header + auth data + binary.BigEndian.PutUint32(authBytes, uint32(length)) + copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password)) + + err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout)) + if err != nil { + Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error()) + return err + } + + requestTime := time.Now() + bytesWritten, err := b.conn.Write(authBytes) + b.updateOutgoingCommunicationMetrics(bytesWritten) + if err != nil { + Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error()) + return err + } + + header := make([]byte, 4) + n, err := io.ReadFull(b.conn, header) + b.updateIncomingCommunicationMetrics(n, time.Since(requestTime)) + // If the credentials are valid, we would get a 4 byte response filled with null characters. + // Otherwise, the broker closes the connection and we get an EOF + if err != nil { + Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error()) + return err + } + + Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header) + return nil +} + +func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) { + b.updateRequestLatencyMetrics(requestLatency) + b.responseRate.Mark(1) + if b.brokerResponseRate != nil { + b.brokerResponseRate.Mark(1) + } + responseSize := int64(bytes) + b.incomingByteRate.Mark(responseSize) + if b.brokerIncomingByteRate != nil { + b.brokerIncomingByteRate.Mark(responseSize) + } + b.responseSize.Update(responseSize) + if b.brokerResponseSize != nil { + b.brokerResponseSize.Update(responseSize) + } +} + +func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) { + requestLatencyInMs := int64(requestLatency / time.Millisecond) + b.requestLatency.Update(requestLatencyInMs) + if b.brokerRequestLatency != nil { + b.brokerRequestLatency.Update(requestLatencyInMs) + } +} + +func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) { + b.requestRate.Mark(1) + if b.brokerRequestRate != nil { + b.brokerRequestRate.Mark(1) + } + requestSize := int64(bytes) + b.outgoingByteRate.Mark(requestSize) + if b.brokerOutgoingByteRate != nil { + b.brokerOutgoingByteRate.Mark(requestSize) + } + b.requestSize.Update(requestSize) + if b.brokerRequestSize != nil { + b.brokerRequestSize.Update(requestSize) + } +} diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go new file mode 100644 index 0000000..c6510d9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/client.go @@ -0,0 +1,791 @@ +package sarama + +import ( + "math/rand" + "sort" + "sync" + "time" +) + +// Client is a generic Kafka client. It manages connections to one or more Kafka brokers. +// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected +// automatically when it passes out of scope. It is safe to share a client amongst many +// users, however Kafka will process requests from a single client strictly in serial, +// so it is generally more efficient to use the default one client per producer/consumer. +type Client interface { + // Config returns the Config struct of the client. This struct should not be + // altered after it has been created. + Config() *Config + + // Brokers returns the current set of active brokers as retrieved from cluster metadata. + Brokers() []*Broker + + // Topics returns the set of available topics as retrieved from cluster metadata. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + Partitions(topic string) ([]int32, error) + + // WritablePartitions returns the sorted list of all writable partition IDs for + // the given topic, where "writable" means "having a valid leader accepting + // writes". + WritablePartitions(topic string) ([]int32, error) + + // Leader returns the broker object that is the leader of the current + // topic/partition, as determined by querying the cluster metadata. + Leader(topic string, partitionID int32) (*Broker, error) + + // Replicas returns the set of all replica IDs for the given partition. + Replicas(topic string, partitionID int32) ([]int32, error) + + // InSyncReplicas returns the set of all in-sync replica IDs for the given + // partition. In-sync replicas are replicas which are fully caught up with + // the partition leader. + InSyncReplicas(topic string, partitionID int32) ([]int32, error) + + // RefreshMetadata takes a list of topics and queries the cluster to refresh the + // available metadata for those topics. If no topics are provided, it will refresh + // metadata for all topics. + RefreshMetadata(topics ...string) error + + // GetOffset queries the cluster to get the most recent available offset at the + // given time on the topic/partition combination. Time should be OffsetOldest for + // the earliest available offset, OffsetNewest for the offset of the message that + // will be produced next, or a time. + GetOffset(topic string, partitionID int32, time int64) (int64, error) + + // Coordinator returns the coordinating broker for a consumer group. It will + // return a locally cached value if it's available. You can call + // RefreshCoordinator to update the cached value. This function only works on + // Kafka 0.8.2 and higher. + Coordinator(consumerGroup string) (*Broker, error) + + // RefreshCoordinator retrieves the coordinator for a consumer group and stores it + // in local cache. This function only works on Kafka 0.8.2 and higher. + RefreshCoordinator(consumerGroup string) error + + // Close shuts down all broker connections managed by this client. It is required + // to call this function before a client object passes out of scope, as it will + // otherwise leak memory. You must close any Producers or Consumers using a client + // before you close the client. + Close() error + + // Closed returns true if the client has already had Close called on it + Closed() bool +} + +const ( + // OffsetNewest stands for the log head offset, i.e. the offset that will be + // assigned to the next message that will be produced to the partition. You + // can send this to a client's GetOffset method to get this offset, or when + // calling ConsumePartition to start consuming new messages. + OffsetNewest int64 = -1 + // OffsetOldest stands for the oldest offset available on the broker for a + // partition. You can send this to a client's GetOffset method to get this + // offset, or when calling ConsumePartition to start consuming from the + // oldest offset that is still available on the broker. + OffsetOldest int64 = -2 +) + +type client struct { + conf *Config + closer, closed chan none // for shutting down background metadata updater + + // the broker addresses given to us through the constructor are not guaranteed to be returned in + // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?) + // so we store them separately + seedBrokers []*Broker + deadSeeds []*Broker + + brokers map[int32]*Broker // maps broker ids to brokers + metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata + coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs + + // If the number of partitions is large, we can get some churn calling cachedPartitions, + // so the result is cached. It is important to update this value whenever metadata is changed + cachedPartitionsResults map[string][maxPartitionIndex][]int32 + + lock sync.RWMutex // protects access to the maps that hold cluster state. +} + +// NewClient creates a new Client. It connects to one of the given broker addresses +// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot +// be retrieved from any of the given broker addresses, the client is not created. +func NewClient(addrs []string, conf *Config) (Client, error) { + Logger.Println("Initializing new client") + + if conf == nil { + conf = NewConfig() + } + + if err := conf.Validate(); err != nil { + return nil, err + } + + if len(addrs) < 1 { + return nil, ConfigurationError("You must provide at least one broker address") + } + + client := &client{ + conf: conf, + closer: make(chan none), + closed: make(chan none), + brokers: make(map[int32]*Broker), + metadata: make(map[string]map[int32]*PartitionMetadata), + cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32), + coordinators: make(map[string]int32), + } + + random := rand.New(rand.NewSource(time.Now().UnixNano())) + for _, index := range random.Perm(len(addrs)) { + client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index])) + } + + if conf.Metadata.Full { + // do an initial fetch of all cluster metadata by specifying an empty list of topics + err := client.RefreshMetadata() + switch err { + case nil: + break + case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed: + // indicates that maybe part of the cluster is down, but is not fatal to creating the client + Logger.Println(err) + default: + close(client.closed) // we haven't started the background updater yet, so we have to do this manually + _ = client.Close() + return nil, err + } + } + go withRecover(client.backgroundMetadataUpdater) + + Logger.Println("Successfully initialized new client") + + return client, nil +} + +func (client *client) Config() *Config { + return client.conf +} + +func (client *client) Brokers() []*Broker { + client.lock.RLock() + defer client.lock.RUnlock() + brokers := make([]*Broker, 0) + for _, broker := range client.brokers { + brokers = append(brokers, broker) + } + return brokers +} + +func (client *client) Close() error { + if client.Closed() { + // Chances are this is being called from a defer() and the error will go unobserved + // so we go ahead and log the event in this case. + Logger.Printf("Close() called on already closed client") + return ErrClosedClient + } + + // shutdown and wait for the background thread before we take the lock, to avoid races + close(client.closer) + <-client.closed + + client.lock.Lock() + defer client.lock.Unlock() + Logger.Println("Closing Client") + + for _, broker := range client.brokers { + safeAsyncClose(broker) + } + + for _, broker := range client.seedBrokers { + safeAsyncClose(broker) + } + + client.brokers = nil + client.metadata = nil + + return nil +} + +func (client *client) Closed() bool { + return client.brokers == nil +} + +func (client *client) Topics() ([]string, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + client.lock.RLock() + defer client.lock.RUnlock() + + ret := make([]string, 0, len(client.metadata)) + for topic := range client.metadata { + ret = append(ret, topic) + } + + return ret, nil +} + +func (client *client) Partitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, allPartitions) + + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, allPartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) WritablePartitions(topic string) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + partitions := client.cachedPartitions(topic, writablePartitions) + + // len==0 catches when it's nil (no such topic) and the odd case when every single + // partition is undergoing leader election simultaneously. Callers have to be able to handle + // this function returning an empty slice (which is a valid return value) but catching it + // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers + // a metadata refresh as a nicety so callers can just try again and don't have to manually + // trigger a refresh (otherwise they'd just keep getting a stale cached copy). + if len(partitions) == 0 { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + partitions = client.cachedPartitions(topic, writablePartitions) + } + + if partitions == nil { + return nil, ErrUnknownTopicOrPartition + } + + return partitions, nil +} + +func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return nil, metadata.Err + } + return dupInt32Slice(metadata.Replicas), nil +} + +func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + metadata := client.cachedMetadata(topic, partitionID) + + if metadata == nil { + err := client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + metadata = client.cachedMetadata(topic, partitionID) + } + + if metadata == nil { + return nil, ErrUnknownTopicOrPartition + } + + if metadata.Err == ErrReplicaNotAvailable { + return nil, metadata.Err + } + return dupInt32Slice(metadata.Isr), nil +} + +func (client *client) Leader(topic string, partitionID int32) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + leader, err := client.cachedLeader(topic, partitionID) + + if leader == nil { + err = client.RefreshMetadata(topic) + if err != nil { + return nil, err + } + leader, err = client.cachedLeader(topic, partitionID) + } + + return leader, err +} + +func (client *client) RefreshMetadata(topics ...string) error { + if client.Closed() { + return ErrClosedClient + } + + // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper + // error. This handles the case by returning an error instead of sending it + // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310 + for _, topic := range topics { + if len(topic) == 0 { + return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return + } + } + + return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max) +} + +func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) { + if client.Closed() { + return -1, ErrClosedClient + } + + offset, err := client.getOffset(topic, partitionID, time) + + if err != nil { + if err := client.RefreshMetadata(topic); err != nil { + return -1, err + } + return client.getOffset(topic, partitionID, time) + } + + return offset, err +} + +func (client *client) Coordinator(consumerGroup string) (*Broker, error) { + if client.Closed() { + return nil, ErrClosedClient + } + + coordinator := client.cachedCoordinator(consumerGroup) + + if coordinator == nil { + if err := client.RefreshCoordinator(consumerGroup); err != nil { + return nil, err + } + coordinator = client.cachedCoordinator(consumerGroup) + } + + if coordinator == nil { + return nil, ErrConsumerCoordinatorNotAvailable + } + + _ = coordinator.Open(client.conf) + return coordinator, nil +} + +func (client *client) RefreshCoordinator(consumerGroup string) error { + if client.Closed() { + return ErrClosedClient + } + + response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max) + if err != nil { + return err + } + + client.lock.Lock() + defer client.lock.Unlock() + client.registerBroker(response.Coordinator) + client.coordinators[consumerGroup] = response.Coordinator.ID() + return nil +} + +// private broker management helpers + +// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered +// in the brokers map. It returns the broker that is registered, which may be the provided broker, +// or a previously registered Broker instance. You must hold the write lock before calling this function. +func (client *client) registerBroker(broker *Broker) { + if client.brokers[broker.ID()] == nil { + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr()) + } else if broker.Addr() != client.brokers[broker.ID()].Addr() { + safeAsyncClose(client.brokers[broker.ID()]) + client.brokers[broker.ID()] = broker + Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr()) + } +} + +// deregisterBroker removes a broker from the seedsBroker list, and if it's +// not the seedbroker, removes it from brokers map completely. +func (client *client) deregisterBroker(broker *Broker) { + client.lock.Lock() + defer client.lock.Unlock() + + if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] { + client.deadSeeds = append(client.deadSeeds, broker) + client.seedBrokers = client.seedBrokers[1:] + } else { + // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever, + // but we really shouldn't have to; once that loop is made better this case can be + // removed, and the function generally can be renamed from `deregisterBroker` to + // `nextSeedBroker` or something + Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr()) + delete(client.brokers, broker.ID()) + } +} + +func (client *client) resurrectDeadBrokers() { + client.lock.Lock() + defer client.lock.Unlock() + + Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds)) + client.seedBrokers = append(client.seedBrokers, client.deadSeeds...) + client.deadSeeds = nil +} + +func (client *client) any() *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + + if len(client.seedBrokers) > 0 { + _ = client.seedBrokers[0].Open(client.conf) + return client.seedBrokers[0] + } + + // not guaranteed to be random *or* deterministic + for _, broker := range client.brokers { + _ = broker.Open(client.conf) + return broker + } + + return nil +} + +// private caching/lazy metadata helpers + +type partitionType int + +const ( + allPartitions partitionType = iota + writablePartitions + // If you add any more types, update the partition cache in update() + + // Ensure this is the last partition type value + maxPartitionIndex +) + +func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + return partitions[partitionID] + } + + return nil +} + +func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions, exists := client.cachedPartitionsResults[topic] + + if !exists { + return nil + } + return partitions[partitionSet] +} + +func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 { + partitions := client.metadata[topic] + + if partitions == nil { + return nil + } + + ret := make([]int32, 0, len(partitions)) + for _, partition := range partitions { + if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable { + continue + } + ret = append(ret, partition.ID) + } + + sort.Sort(int32Slice(ret)) + return ret +} + +func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) { + client.lock.RLock() + defer client.lock.RUnlock() + + partitions := client.metadata[topic] + if partitions != nil { + metadata, ok := partitions[partitionID] + if ok { + if metadata.Err == ErrLeaderNotAvailable { + return nil, ErrLeaderNotAvailable + } + b := client.brokers[metadata.Leader] + if b == nil { + return nil, ErrLeaderNotAvailable + } + _ = b.Open(client.conf) + return b, nil + } + } + + return nil, ErrUnknownTopicOrPartition +} + +func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) { + broker, err := client.Leader(topic, partitionID) + if err != nil { + return -1, err + } + + request := &OffsetRequest{} + if client.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 1 + } + request.AddBlock(topic, partitionID, time, 1) + + response, err := broker.GetAvailableOffsets(request) + if err != nil { + _ = broker.Close() + return -1, err + } + + block := response.GetBlock(topic, partitionID) + if block == nil { + _ = broker.Close() + return -1, ErrIncompleteResponse + } + if block.Err != ErrNoError { + return -1, block.Err + } + if len(block.Offsets) != 1 { + return -1, ErrOffsetOutOfRange + } + + return block.Offsets[0], nil +} + +// core metadata update logic + +func (client *client) backgroundMetadataUpdater() { + defer close(client.closed) + + if client.conf.Metadata.RefreshFrequency == time.Duration(0) { + return + } + + ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency) + defer ticker.Stop() + + for { + select { + case <-ticker.C: + topics := []string{} + if !client.conf.Metadata.Full { + if specificTopics, err := client.Topics(); err != nil { + Logger.Println("Client background metadata topic load:", err) + break + } else { + topics = specificTopics + } + } + + if err := client.RefreshMetadata(topics...); err != nil { + Logger.Println("Client background metadata update:", err) + } + case <-client.closer: + return + } + } +} + +func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error { + retry := func(err error) error { + if attemptsRemaining > 0 { + Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.tryRefreshMetadata(topics, attemptsRemaining-1) + } + return err + } + + for broker := client.any(); broker != nil; broker = client.any() { + if len(topics) > 0 { + Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr) + } else { + Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr) + } + response, err := broker.GetMetadata(&MetadataRequest{Topics: topics}) + + switch err.(type) { + case nil: + // valid response, use it + shouldRetry, err := client.updateMetadata(response) + if shouldRetry { + Logger.Println("client/metadata found some partitions to be leaderless") + return retry(err) // note: err can be nil + } + return err + + case PacketEncodingError: + // didn't even send, return the error + return err + default: + // some other error, remove that broker and try again + Logger.Println("client/metadata got error from broker while fetching metadata:", err) + _ = broker.Close() + client.deregisterBroker(broker) + } + } + + Logger.Println("client/metadata no available broker to send metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} + +// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable +func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) { + client.lock.Lock() + defer client.lock.Unlock() + + // For all the brokers we received: + // - if it is a new ID, save it + // - if it is an existing ID, but the address we have is stale, discard the old one and save it + // - otherwise ignore it, replacing our existing one would just bounce the connection + for _, broker := range data.Brokers { + client.registerBroker(broker) + } + + for _, topic := range data.Topics { + delete(client.metadata, topic.Name) + delete(client.cachedPartitionsResults, topic.Name) + + switch topic.Err { + case ErrNoError: + break + case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results + err = topic.Err + continue + case ErrUnknownTopicOrPartition: // retry, do not store partial partition results + err = topic.Err + retry = true + continue + case ErrLeaderNotAvailable: // retry, but store partial partition results + retry = true + break + default: // don't retry, don't store partial results + Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err) + err = topic.Err + continue + } + + client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions)) + for _, partition := range topic.Partitions { + client.metadata[topic.Name][partition.ID] = partition + if partition.Err == ErrLeaderNotAvailable { + retry = true + } + } + + var partitionCache [maxPartitionIndex][]int32 + partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions) + partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions) + client.cachedPartitionsResults[topic.Name] = partitionCache + } + + return +} + +func (client *client) cachedCoordinator(consumerGroup string) *Broker { + client.lock.RLock() + defer client.lock.RUnlock() + if coordinatorID, ok := client.coordinators[consumerGroup]; ok { + return client.brokers[coordinatorID] + } + return nil +} + +func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) { + retry := func(err error) (*ConsumerMetadataResponse, error) { + if attemptsRemaining > 0 { + Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining) + time.Sleep(client.conf.Metadata.Retry.Backoff) + return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1) + } + return nil, err + } + + for broker := client.any(); broker != nil; broker = client.any() { + Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr()) + + request := new(ConsumerMetadataRequest) + request.ConsumerGroup = consumerGroup + + response, err := broker.GetConsumerMetadata(request) + + if err != nil { + Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err) + + switch err.(type) { + case PacketEncodingError: + return nil, err + default: + _ = broker.Close() + client.deregisterBroker(broker) + continue + } + } + + switch response.Err { + case ErrNoError: + Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr()) + return response, nil + + case ErrConsumerCoordinatorNotAvailable: + Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup) + + // This is very ugly, but this scenario will only happen once per cluster. + // The __consumer_offsets topic only has to be created one time. + // The number of partitions not configurable, but partition 0 should always exist. + if _, err := client.Leader("__consumer_offsets", 0); err != nil { + Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n") + time.Sleep(2 * time.Second) + } + + return retry(ErrConsumerCoordinatorNotAvailable) + default: + return nil, response.Err + } + } + + Logger.Println("client/coordinator no available broker to send consumer metadata request to") + client.resurrectDeadBrokers() + return retry(ErrOutOfBrokers) +} diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go new file mode 100644 index 0000000..e0774c6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/config.go @@ -0,0 +1,430 @@ +package sarama + +import ( + "crypto/tls" + "regexp" + "time" + + "github.com/rcrowley/go-metrics" +) + +const defaultClientID = "sarama" + +var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`) + +// Config is used to pass multiple configuration options to Sarama's constructors. +type Config struct { + // Net is the namespace for network-level properties used by the Broker, and + // shared by the Client/Producer/Consumer. + Net struct { + // How many outstanding requests a connection is allowed to have before + // sending on it blocks (default 5). + MaxOpenRequests int + + // All three of the below configurations are similar to the + // `socket.timeout.ms` setting in JVM kafka. All of them default + // to 30 seconds. + DialTimeout time.Duration // How long to wait for the initial connection. + ReadTimeout time.Duration // How long to wait for a response. + WriteTimeout time.Duration // How long to wait for a transmit. + + TLS struct { + // Whether or not to use TLS when connecting to the broker + // (defaults to false). + Enable bool + // The TLS configuration to use for secure connections if + // enabled (defaults to nil). + Config *tls.Config + } + + // SASL based authentication with broker. While there are multiple SASL authentication methods + // the current implementation is limited to plaintext (SASL/PLAIN) authentication + SASL struct { + // Whether or not to use SASL authentication when connecting to the broker + // (defaults to false). + Enable bool + // Whether or not to send the Kafka SASL handshake first if enabled + // (defaults to true). You should only set this to false if you're using + // a non-Kafka SASL proxy. + Handshake bool + //username and password for SASL/PLAIN authentication + User string + Password string + } + + // KeepAlive specifies the keep-alive period for an active network connection. + // If zero, keep-alives are disabled. (default is 0: disabled). + KeepAlive time.Duration + } + + // Metadata is the namespace for metadata management properties used by the + // Client, and shared by the Producer/Consumer. + Metadata struct { + Retry struct { + // The total number of times to retry a metadata request when the + // cluster is in the middle of a leader election (default 3). + Max int + // How long to wait for leader election to occur before retrying + // (default 250ms). Similar to the JVM's `retry.backoff.ms`. + Backoff time.Duration + } + // How frequently to refresh the cluster metadata in the background. + // Defaults to 10 minutes. Set to 0 to disable. Similar to + // `topic.metadata.refresh.interval.ms` in the JVM version. + RefreshFrequency time.Duration + + // Whether to maintain a full set of metadata for all topics, or just + // the minimal set that has been necessary so far. The full set is simpler + // and usually more convenient, but can take up a substantial amount of + // memory if you have many topics and partitions. Defaults to true. + Full bool + } + + // Producer is the namespace for configuration related to producing messages, + // used by the Producer. + Producer struct { + // The maximum permitted size of a message (defaults to 1000000). Should be + // set equal to or smaller than the broker's `message.max.bytes`. + MaxMessageBytes int + // The level of acknowledgement reliability needed from the broker (defaults + // to WaitForLocal). Equivalent to the `request.required.acks` setting of the + // JVM producer. + RequiredAcks RequiredAcks + // The maximum duration the broker will wait the receipt of the number of + // RequiredAcks (defaults to 10 seconds). This is only relevant when + // RequiredAcks is set to WaitForAll or a number > 1. Only supports + // millisecond resolution, nanoseconds will be truncated. Equivalent to + // the JVM producer's `request.timeout.ms` setting. + Timeout time.Duration + // The type of compression to use on messages (defaults to no compression). + // Similar to `compression.codec` setting of the JVM producer. + Compression CompressionCodec + // Generates partitioners for choosing the partition to send messages to + // (defaults to hashing the message key). Similar to the `partitioner.class` + // setting for the JVM producer. + Partitioner PartitionerConstructor + + // Return specifies what channels will be populated. If they are set to true, + // you must read from the respective channels to prevent deadlock. If, + // however, this config is used to create a `SyncProducer`, both must be set + // to true and you shall not read from the channels since the producer does + // this internally. + Return struct { + // If enabled, successfully delivered messages will be returned on the + // Successes channel (default disabled). + Successes bool + + // If enabled, messages that failed to deliver will be returned on the + // Errors channel, including error (default enabled). + Errors bool + } + + // The following config options control how often messages are batched up and + // sent to the broker. By default, messages are sent as fast as possible, and + // all messages received while the current batch is in-flight are placed + // into the subsequent batch. + Flush struct { + // The best-effort number of bytes needed to trigger a flush. Use the + // global sarama.MaxRequestSize to set a hard upper limit. + Bytes int + // The best-effort number of messages needed to trigger a flush. Use + // `MaxMessages` to set a hard upper limit. + Messages int + // The best-effort frequency of flushes. Equivalent to + // `queue.buffering.max.ms` setting of JVM producer. + Frequency time.Duration + // The maximum number of messages the producer will send in a single + // broker request. Defaults to 0 for unlimited. Similar to + // `queue.buffering.max.messages` in the JVM producer. + MaxMessages int + } + + Retry struct { + // The total number of times to retry sending a message (default 3). + // Similar to the `message.send.max.retries` setting of the JVM producer. + Max int + // How long to wait for the cluster to settle between retries + // (default 100ms). Similar to the `retry.backoff.ms` setting of the + // JVM producer. + Backoff time.Duration + } + } + + // Consumer is the namespace for configuration related to consuming messages, + // used by the Consumer. + // + // Note that Sarama's Consumer type does not currently support automatic + // consumer-group rebalancing and offset tracking. For Zookeeper-based + // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka + // library builds on Sarama to add this support. For Kafka-based tracking + // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library + // builds on Sarama to add this support. + Consumer struct { + Retry struct { + // How long to wait after a failing to read from a partition before + // trying again (default 2s). + Backoff time.Duration + } + + // Fetch is the namespace for controlling how many bytes are retrieved by any + // given request. + Fetch struct { + // The minimum number of message bytes to fetch in a request - the broker + // will wait until at least this many are available. The default is 1, + // as 0 causes the consumer to spin when no messages are available. + // Equivalent to the JVM's `fetch.min.bytes`. + Min int32 + // The default number of message bytes to fetch from the broker in each + // request (default 32768). This should be larger than the majority of + // your messages, or else the consumer will spend a lot of time + // negotiating sizes and not actually consuming. Similar to the JVM's + // `fetch.message.max.bytes`. + Default int32 + // The maximum number of message bytes to fetch from the broker in a + // single request. Messages larger than this will return + // ErrMessageTooLarge and will not be consumable, so you must be sure + // this is at least as large as your largest message. Defaults to 0 + // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The + // global `sarama.MaxResponseSize` still applies. + Max int32 + } + // The maximum amount of time the broker will wait for Consumer.Fetch.Min + // bytes to become available before it returns fewer than that anyways. The + // default is 250ms, since 0 causes the consumer to spin when no events are + // available. 100-500ms is a reasonable range for most cases. Kafka only + // supports precision up to milliseconds; nanoseconds will be truncated. + // Equivalent to the JVM's `fetch.wait.max.ms`. + MaxWaitTime time.Duration + + // The maximum amount of time the consumer expects a message takes to process + // for the user. If writing to the Messages channel takes longer than this, + // that partition will stop fetching more messages until it can proceed again. + // Note that, since the Messages channel is buffered, the actual grace time is + // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms. + MaxProcessingTime time.Duration + + // Return specifies what channels will be populated. If they are set to true, + // you must read from them to prevent deadlock. + Return struct { + // If enabled, any errors that occurred while consuming are returned on + // the Errors channel (default disabled). + Errors bool + } + + // Offsets specifies configuration for how and when to commit consumed + // offsets. This currently requires the manual use of an OffsetManager + // but will eventually be automated. + Offsets struct { + // How frequently to commit updated offsets. Defaults to 1s. + CommitInterval time.Duration + + // The initial offset to use if no offset was previously committed. + // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest. + Initial int64 + + // The retention duration for committed offsets. If zero, disabled + // (in which case the `offsets.retention.minutes` option on the + // broker will be used). Kafka only supports precision up to + // milliseconds; nanoseconds will be truncated. Requires Kafka + // broker version 0.9.0 or later. + // (default is 0: disabled). + Retention time.Duration + } + } + + // A user-provided string sent with every request to the brokers for logging, + // debugging, and auditing purposes. Defaults to "sarama", but you should + // probably set it to something specific to your application. + ClientID string + // The number of events to buffer in internal and external channels. This + // permits the producer and consumer to continue processing some messages + // in the background while user code is working, greatly improving throughput. + // Defaults to 256. + ChannelBufferSize int + // The version of Kafka that Sarama will assume it is running against. + // Defaults to the oldest supported stable version. Since Kafka provides + // backwards-compatibility, setting it to a version older than you have + // will not break anything, although it may prevent you from using the + // latest features. Setting it to a version greater than you are actually + // running may lead to random breakage. + Version KafkaVersion + // The registry to define metrics into. + // Defaults to a local registry. + // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true" + // prior to starting Sarama. + // See Examples on how to use the metrics registry + MetricRegistry metrics.Registry +} + +// NewConfig returns a new configuration instance with sane defaults. +func NewConfig() *Config { + c := &Config{} + + c.Net.MaxOpenRequests = 5 + c.Net.DialTimeout = 30 * time.Second + c.Net.ReadTimeout = 30 * time.Second + c.Net.WriteTimeout = 30 * time.Second + c.Net.SASL.Handshake = true + + c.Metadata.Retry.Max = 3 + c.Metadata.Retry.Backoff = 250 * time.Millisecond + c.Metadata.RefreshFrequency = 10 * time.Minute + c.Metadata.Full = true + + c.Producer.MaxMessageBytes = 1000000 + c.Producer.RequiredAcks = WaitForLocal + c.Producer.Timeout = 10 * time.Second + c.Producer.Partitioner = NewHashPartitioner + c.Producer.Retry.Max = 3 + c.Producer.Retry.Backoff = 100 * time.Millisecond + c.Producer.Return.Errors = true + + c.Consumer.Fetch.Min = 1 + c.Consumer.Fetch.Default = 32768 + c.Consumer.Retry.Backoff = 2 * time.Second + c.Consumer.MaxWaitTime = 250 * time.Millisecond + c.Consumer.MaxProcessingTime = 100 * time.Millisecond + c.Consumer.Return.Errors = false + c.Consumer.Offsets.CommitInterval = 1 * time.Second + c.Consumer.Offsets.Initial = OffsetNewest + + c.ClientID = defaultClientID + c.ChannelBufferSize = 256 + c.Version = minVersion + c.MetricRegistry = metrics.NewRegistry() + + return c +} + +// Validate checks a Config instance. It will return a +// ConfigurationError if the specified values don't make sense. +func (c *Config) Validate() error { + // some configuration values should be warned on but not fail completely, do those first + if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil { + Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.") + } + if c.Net.SASL.Enable == false { + if c.Net.SASL.User != "" { + Logger.Println("Net.SASL is disabled but a non-empty username was provided.") + } + if c.Net.SASL.Password != "" { + Logger.Println("Net.SASL is disabled but a non-empty password was provided.") + } + } + if c.Producer.RequiredAcks > 1 { + Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.") + } + if c.Producer.MaxMessageBytes >= int(MaxRequestSize) { + Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.") + } + if c.Producer.Flush.Bytes >= int(MaxRequestSize) { + Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.") + } + if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 { + Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.") + } + if c.Producer.Timeout%time.Millisecond != 0 { + Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.") + } + if c.Consumer.MaxWaitTime < 100*time.Millisecond { + Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.") + } + if c.Consumer.MaxWaitTime%time.Millisecond != 0 { + Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.") + } + if c.Consumer.Offsets.Retention%time.Millisecond != 0 { + Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.") + } + if c.ClientID == defaultClientID { + Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.") + } + + // validate Net values + switch { + case c.Net.MaxOpenRequests <= 0: + return ConfigurationError("Net.MaxOpenRequests must be > 0") + case c.Net.DialTimeout <= 0: + return ConfigurationError("Net.DialTimeout must be > 0") + case c.Net.ReadTimeout <= 0: + return ConfigurationError("Net.ReadTimeout must be > 0") + case c.Net.WriteTimeout <= 0: + return ConfigurationError("Net.WriteTimeout must be > 0") + case c.Net.KeepAlive < 0: + return ConfigurationError("Net.KeepAlive must be >= 0") + case c.Net.SASL.Enable == true && c.Net.SASL.User == "": + return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled") + case c.Net.SASL.Enable == true && c.Net.SASL.Password == "": + return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled") + } + + // validate the Metadata values + switch { + case c.Metadata.Retry.Max < 0: + return ConfigurationError("Metadata.Retry.Max must be >= 0") + case c.Metadata.Retry.Backoff < 0: + return ConfigurationError("Metadata.Retry.Backoff must be >= 0") + case c.Metadata.RefreshFrequency < 0: + return ConfigurationError("Metadata.RefreshFrequency must be >= 0") + } + + // validate the Producer values + switch { + case c.Producer.MaxMessageBytes <= 0: + return ConfigurationError("Producer.MaxMessageBytes must be > 0") + case c.Producer.RequiredAcks < -1: + return ConfigurationError("Producer.RequiredAcks must be >= -1") + case c.Producer.Timeout <= 0: + return ConfigurationError("Producer.Timeout must be > 0") + case c.Producer.Partitioner == nil: + return ConfigurationError("Producer.Partitioner must not be nil") + case c.Producer.Flush.Bytes < 0: + return ConfigurationError("Producer.Flush.Bytes must be >= 0") + case c.Producer.Flush.Messages < 0: + return ConfigurationError("Producer.Flush.Messages must be >= 0") + case c.Producer.Flush.Frequency < 0: + return ConfigurationError("Producer.Flush.Frequency must be >= 0") + case c.Producer.Flush.MaxMessages < 0: + return ConfigurationError("Producer.Flush.MaxMessages must be >= 0") + case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages: + return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set") + case c.Producer.Retry.Max < 0: + return ConfigurationError("Producer.Retry.Max must be >= 0") + case c.Producer.Retry.Backoff < 0: + return ConfigurationError("Producer.Retry.Backoff must be >= 0") + } + + if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) { + return ConfigurationError("lz4 compression requires Version >= V0_10_0_0") + } + + // validate the Consumer values + switch { + case c.Consumer.Fetch.Min <= 0: + return ConfigurationError("Consumer.Fetch.Min must be > 0") + case c.Consumer.Fetch.Default <= 0: + return ConfigurationError("Consumer.Fetch.Default must be > 0") + case c.Consumer.Fetch.Max < 0: + return ConfigurationError("Consumer.Fetch.Max must be >= 0") + case c.Consumer.MaxWaitTime < 1*time.Millisecond: + return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms") + case c.Consumer.MaxProcessingTime <= 0: + return ConfigurationError("Consumer.MaxProcessingTime must be > 0") + case c.Consumer.Retry.Backoff < 0: + return ConfigurationError("Consumer.Retry.Backoff must be >= 0") + case c.Consumer.Offsets.CommitInterval <= 0: + return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0") + case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest: + return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest") + + } + + // validate misc shared values + switch { + case c.ChannelBufferSize < 0: + return ConfigurationError("ChannelBufferSize must be >= 0") + case !validID.MatchString(c.ClientID): + return ConfigurationError("ClientID is invalid") + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go new file mode 100644 index 0000000..9a5650b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer.go @@ -0,0 +1,747 @@ +package sarama + +import ( + "errors" + "fmt" + "sync" + "sync/atomic" + "time" +) + +// ConsumerMessage encapsulates a Kafka message returned by the consumer. +type ConsumerMessage struct { + Key, Value []byte + Topic string + Partition int32 + Offset int64 + Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp + BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp +} + +// ConsumerError is what is provided to the user when an error occurs. +// It wraps an error and includes the topic and partition. +type ConsumerError struct { + Topic string + Partition int32 + Err error +} + +func (ce ConsumerError) Error() string { + return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err) +} + +// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface. +// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors +// when stopping. +type ConsumerErrors []*ConsumerError + +func (ce ConsumerErrors) Error() string { + return fmt.Sprintf("kafka: %d errors while consuming", len(ce)) +} + +// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close() +// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of +// scope. +// +// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking. +// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library +// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the +// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. +type Consumer interface { + + // Topics returns the set of available topics as retrieved from the cluster + // metadata. This method is the same as Client.Topics(), and is provided for + // convenience. + Topics() ([]string, error) + + // Partitions returns the sorted list of all partition IDs for the given topic. + // This method is the same as Client.Partitions(), and is provided for convenience. + Partitions(topic string) ([]int32, error) + + // ConsumePartition creates a PartitionConsumer on the given topic/partition with + // the given offset. It will return an error if this Consumer is already consuming + // on the given topic/partition. Offset can be a literal offset, or OffsetNewest + // or OffsetOldest + ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) + + // HighWaterMarks returns the current high water marks for each topic and partition. + // Consistency between partitions is not guaranteed since high water marks are updated separately. + HighWaterMarks() map[string]map[int32]int64 + + // Close shuts down the consumer. It must be called after all child + // PartitionConsumers have already been closed. + Close() error +} + +type consumer struct { + client Client + conf *Config + ownClient bool + + lock sync.Mutex + children map[string]map[int32]*partitionConsumer + brokerConsumers map[*Broker]*brokerConsumer +} + +// NewConsumer creates a new consumer using the given broker addresses and configuration. +func NewConsumer(addrs []string, config *Config) (Consumer, error) { + client, err := NewClient(addrs, config) + if err != nil { + return nil, err + } + + c, err := NewConsumerFromClient(client) + if err != nil { + return nil, err + } + c.(*consumer).ownClient = true + return c, nil +} + +// NewConsumerFromClient creates a new consumer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this consumer. +func NewConsumerFromClient(client Client) (Consumer, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + c := &consumer{ + client: client, + conf: client.Config(), + children: make(map[string]map[int32]*partitionConsumer), + brokerConsumers: make(map[*Broker]*brokerConsumer), + } + + return c, nil +} + +func (c *consumer) Close() error { + if c.ownClient { + return c.client.Close() + } + return nil +} + +func (c *consumer) Topics() ([]string, error) { + return c.client.Topics() +} + +func (c *consumer) Partitions(topic string) ([]int32, error) { + return c.client.Partitions(topic) +} + +func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) { + child := &partitionConsumer{ + consumer: c, + conf: c.conf, + topic: topic, + partition: partition, + messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize), + errors: make(chan *ConsumerError, c.conf.ChannelBufferSize), + feeder: make(chan *FetchResponse, 1), + trigger: make(chan none, 1), + dying: make(chan none), + fetchSize: c.conf.Consumer.Fetch.Default, + } + + if err := child.chooseStartingOffset(offset); err != nil { + return nil, err + } + + var leader *Broker + var err error + if leader, err = c.client.Leader(child.topic, child.partition); err != nil { + return nil, err + } + + if err := c.addChild(child); err != nil { + return nil, err + } + + go withRecover(child.dispatcher) + go withRecover(child.responseFeeder) + + child.broker = c.refBrokerConsumer(leader) + child.broker.input <- child + + return child, nil +} + +func (c *consumer) HighWaterMarks() map[string]map[int32]int64 { + c.lock.Lock() + defer c.lock.Unlock() + + hwms := make(map[string]map[int32]int64) + for topic, p := range c.children { + hwm := make(map[int32]int64, len(p)) + for partition, pc := range p { + hwm[partition] = pc.HighWaterMarkOffset() + } + hwms[topic] = hwm + } + + return hwms +} + +func (c *consumer) addChild(child *partitionConsumer) error { + c.lock.Lock() + defer c.lock.Unlock() + + topicChildren := c.children[child.topic] + if topicChildren == nil { + topicChildren = make(map[int32]*partitionConsumer) + c.children[child.topic] = topicChildren + } + + if topicChildren[child.partition] != nil { + return ConfigurationError("That topic/partition is already being consumed") + } + + topicChildren[child.partition] = child + return nil +} + +func (c *consumer) removeChild(child *partitionConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.children[child.topic], child.partition) +} + +func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer { + c.lock.Lock() + defer c.lock.Unlock() + + bc := c.brokerConsumers[broker] + if bc == nil { + bc = c.newBrokerConsumer(broker) + c.brokerConsumers[broker] = bc + } + + bc.refs++ + + return bc +} + +func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + brokerWorker.refs-- + + if brokerWorker.refs == 0 { + close(brokerWorker.input) + if c.brokerConsumers[brokerWorker.broker] == brokerWorker { + delete(c.brokerConsumers, brokerWorker.broker) + } + } +} + +func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) { + c.lock.Lock() + defer c.lock.Unlock() + + delete(c.brokerConsumers, brokerWorker.broker) +} + +// PartitionConsumer + +// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or +// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out +// of scope. +// +// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range +// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported +// as out of range by the brokers. In this case you should decide what you want to do (try a different offset, +// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying. +// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set +// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement +// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches. +// +// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of +// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process +// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call +// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will +// also drain the Messages channel, harvest all errors & return them once cleanup has completed. +type PartitionConsumer interface { + + // AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you + // should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this + // function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call + // this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain + // the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service + // the Messages channel when this function is called, you will be competing with Close for messages; consider + // calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes + // out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client. + Close() error + + // Messages returns the read channel for the messages that are returned by + // the broker. + Messages() <-chan *ConsumerMessage + + // Errors returns a read channel of errors that occurred during consuming, if + // enabled. By default, errors are logged and not returned over this channel. + // If you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // HighWaterMarkOffset returns the high water mark offset of the partition, + // i.e. the offset that will be used for the next message that will be produced. + // You can use this to determine how far behind the processing is. + HighWaterMarkOffset() int64 +} + +type partitionConsumer struct { + highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG + consumer *consumer + conf *Config + topic string + partition int32 + + broker *brokerConsumer + messages chan *ConsumerMessage + errors chan *ConsumerError + feeder chan *FetchResponse + + trigger, dying chan none + responseResult error + + fetchSize int32 + offset int64 +} + +var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing + +func (child *partitionConsumer) sendError(err error) { + cErr := &ConsumerError{ + Topic: child.topic, + Partition: child.partition, + Err: err, + } + + if child.conf.Consumer.Return.Errors { + child.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (child *partitionConsumer) dispatcher() { + for range child.trigger { + select { + case <-child.dying: + close(child.trigger) + case <-time.After(child.conf.Consumer.Retry.Backoff): + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + child.broker = nil + } + + Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition) + if err := child.dispatch(); err != nil { + child.sendError(err) + child.trigger <- none{} + } + } + } + + if child.broker != nil { + child.consumer.unrefBrokerConsumer(child.broker) + } + child.consumer.removeChild(child) + close(child.feeder) +} + +func (child *partitionConsumer) dispatch() error { + if err := child.consumer.client.RefreshMetadata(child.topic); err != nil { + return err + } + + var leader *Broker + var err error + if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil { + return err + } + + child.broker = child.consumer.refBrokerConsumer(leader) + + child.broker.input <- child + + return nil +} + +func (child *partitionConsumer) chooseStartingOffset(offset int64) error { + newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest) + if err != nil { + return err + } + oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest) + if err != nil { + return err + } + + switch { + case offset == OffsetNewest: + child.offset = newestOffset + case offset == OffsetOldest: + child.offset = oldestOffset + case offset >= oldestOffset && offset <= newestOffset: + child.offset = offset + default: + return ErrOffsetOutOfRange + } + + return nil +} + +func (child *partitionConsumer) Messages() <-chan *ConsumerMessage { + return child.messages +} + +func (child *partitionConsumer) Errors() <-chan *ConsumerError { + return child.errors +} + +func (child *partitionConsumer) AsyncClose() { + // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes + // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and + // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will + // also just close itself) + close(child.dying) +} + +func (child *partitionConsumer) Close() error { + child.AsyncClose() + + go withRecover(func() { + for range child.messages { + // drain + } + }) + + var errors ConsumerErrors + for err := range child.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (child *partitionConsumer) HighWaterMarkOffset() int64 { + return atomic.LoadInt64(&child.highWaterMarkOffset) +} + +func (child *partitionConsumer) responseFeeder() { + var msgs []*ConsumerMessage + expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime) + expireTimedOut := false + +feederLoop: + for response := range child.feeder { + msgs, child.responseResult = child.parseResponse(response) + + for i, msg := range msgs { + if !expiryTimer.Stop() && !expireTimedOut { + // expiryTimer was expired; clear out the waiting msg + <-expiryTimer.C + } + expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime) + expireTimedOut = false + + select { + case child.messages <- msg: + case <-expiryTimer.C: + expireTimedOut = true + child.responseResult = errTimedOut + child.broker.acks.Done() + for _, msg = range msgs[i:] { + child.messages <- msg + } + child.broker.input <- child + continue feederLoop + } + } + + child.broker.acks.Done() + } + + close(child.messages) + close(child.errors) +} + +func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) { + block := response.GetBlock(child.topic, child.partition) + if block == nil { + return nil, ErrIncompleteResponse + } + + if block.Err != ErrNoError { + return nil, block.Err + } + + if len(block.MsgSet.Messages) == 0 { + // We got no messages. If we got a trailing one then we need to ask for more data. + // Otherwise we just poll again and wait for one to be produced... + if block.MsgSet.PartialTrailingMessage { + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max { + // we can't ask for more data, we've hit the configured limit + child.sendError(ErrMessageTooLarge) + child.offset++ // skip this one so we can keep processing future messages + } else { + child.fetchSize *= 2 + if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max { + child.fetchSize = child.conf.Consumer.Fetch.Max + } + } + } + + return nil, nil + } + + // we got messages, reset our fetch size in case it was increased for a previous request + child.fetchSize = child.conf.Consumer.Fetch.Default + atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset) + + incomplete := false + prelude := true + var messages []*ConsumerMessage + for _, msgBlock := range block.MsgSet.Messages { + + for _, msg := range msgBlock.Messages() { + offset := msg.Offset + if msg.Msg.Version >= 1 { + baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset + offset += baseOffset + } + if prelude && offset < child.offset { + continue + } + prelude = false + + if offset >= child.offset { + messages = append(messages, &ConsumerMessage{ + Topic: child.topic, + Partition: child.partition, + Key: msg.Msg.Key, + Value: msg.Msg.Value, + Offset: offset, + Timestamp: msg.Msg.Timestamp, + BlockTimestamp: msgBlock.Msg.Timestamp, + }) + child.offset = offset + 1 + } else { + incomplete = true + } + } + + } + + if incomplete || len(messages) == 0 { + return nil, ErrIncompleteResponse + } + return messages, nil +} + +// brokerConsumer + +type brokerConsumer struct { + consumer *consumer + broker *Broker + input chan *partitionConsumer + newSubscriptions chan []*partitionConsumer + wait chan none + subscriptions map[*partitionConsumer]none + acks sync.WaitGroup + refs int +} + +func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer { + bc := &brokerConsumer{ + consumer: c, + broker: broker, + input: make(chan *partitionConsumer), + newSubscriptions: make(chan []*partitionConsumer), + wait: make(chan none), + subscriptions: make(map[*partitionConsumer]none), + refs: 0, + } + + go withRecover(bc.subscriptionManager) + go withRecover(bc.subscriptionConsumer) + + return bc +} + +func (bc *brokerConsumer) subscriptionManager() { + var buffer []*partitionConsumer + + // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer + // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks + // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give + // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available, + // so the main goroutine can block waiting for work if it has none. + for { + if len(buffer) > 0 { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- buffer: + buffer = nil + case bc.wait <- none{}: + } + } else { + select { + case event, ok := <-bc.input: + if !ok { + goto done + } + buffer = append(buffer, event) + case bc.newSubscriptions <- nil: + } + } + } + +done: + close(bc.wait) + if len(buffer) > 0 { + bc.newSubscriptions <- buffer + } + close(bc.newSubscriptions) +} + +func (bc *brokerConsumer) subscriptionConsumer() { + <-bc.wait // wait for our first piece of work + + // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available + for newSubscriptions := range bc.newSubscriptions { + bc.updateSubscriptions(newSubscriptions) + + if len(bc.subscriptions) == 0 { + // We're about to be shut down or we're about to receive more subscriptions. + // Either way, the signal just hasn't propagated to our goroutine yet. + <-bc.wait + continue + } + + response, err := bc.fetchNewMessages() + + if err != nil { + Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err) + bc.abort(err) + return + } + + bc.acks.Add(len(bc.subscriptions)) + for child := range bc.subscriptions { + child.feeder <- response + } + bc.acks.Wait() + bc.handleResponses() + } +} + +func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) { + for _, child := range newSubscriptions { + bc.subscriptions[child] = none{} + Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + } + + for child := range bc.subscriptions { + select { + case <-child.dying: + Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition) + close(child.trigger) + delete(bc.subscriptions, child) + default: + break + } + } +} + +func (bc *brokerConsumer) handleResponses() { + // handles the response codes left for us by our subscriptions, and abandons ones that have been closed + for child := range bc.subscriptions { + result := child.responseResult + child.responseResult = nil + + switch result { + case nil: + break + case errTimedOut: + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n", + bc.broker.ID(), child.topic, child.partition) + delete(bc.subscriptions, child) + case ErrOffsetOutOfRange: + // there's no point in retrying this it will just fail the same way again + // shut it down and force the user to choose what to do + child.sendError(result) + Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result) + close(child.trigger) + delete(bc.subscriptions, child) + case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable: + // not an error, but does need redispatching + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + default: + // dunno, tell the user and try redispatching + child.sendError(result) + Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n", + bc.broker.ID(), child.topic, child.partition, result) + child.trigger <- none{} + delete(bc.subscriptions, child) + } + } +} + +func (bc *brokerConsumer) abort(err error) { + bc.consumer.abandonBrokerConsumer(bc) + _ = bc.broker.Close() // we don't care about the error this might return, we already have one + + for child := range bc.subscriptions { + child.sendError(err) + child.trigger <- none{} + } + + for newSubscriptions := range bc.newSubscriptions { + if len(newSubscriptions) == 0 { + <-bc.wait + continue + } + for _, child := range newSubscriptions { + child.sendError(err) + child.trigger <- none{} + } + } +} + +func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) { + request := &FetchRequest{ + MinBytes: bc.consumer.conf.Consumer.Fetch.Min, + MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond), + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) { + request.Version = 2 + } + if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) { + request.Version = 3 + request.MaxBytes = MaxResponseSize + } + + for child := range bc.subscriptions { + request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize) + } + + return bc.broker.Fetch(request) +} diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go new file mode 100644 index 0000000..9d92d35 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_group_members.go @@ -0,0 +1,94 @@ +package sarama + +type ConsumerGroupMemberMetadata struct { + Version int16 + Topics []string + UserData []byte +} + +func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putStringArray(m.Topics); err != nil { + return err + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + if m.Topics, err = pd.getStringArray(); err != nil { + return + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +type ConsumerGroupMemberAssignment struct { + Version int16 + Topics map[string][]int32 + UserData []byte +} + +func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error { + pe.putInt16(m.Version) + + if err := pe.putArrayLength(len(m.Topics)); err != nil { + return err + } + + for topic, partitions := range m.Topics { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putInt32Array(partitions); err != nil { + return err + } + } + + if err := pe.putBytes(m.UserData); err != nil { + return err + } + + return nil +} + +func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) { + if m.Version, err = pd.getInt16(); err != nil { + return + } + + var topicLen int + if topicLen, err = pd.getArrayLength(); err != nil { + return + } + + m.Topics = make(map[string][]int32, topicLen) + for i := 0; i < topicLen; i++ { + var topic string + if topic, err = pd.getString(); err != nil { + return + } + if m.Topics[topic], err = pd.getInt32Array(); err != nil { + return + } + } + + if m.UserData, err = pd.getBytes(); err != nil { + return + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go new file mode 100644 index 0000000..483be33 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go @@ -0,0 +1,26 @@ +package sarama + +type ConsumerMetadataRequest struct { + ConsumerGroup string +} + +func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error { + return pe.putString(r.ConsumerGroup) +} + +func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) { + r.ConsumerGroup, err = pd.getString() + return err +} + +func (r *ConsumerMetadataRequest) key() int16 { + return 10 +} + +func (r *ConsumerMetadataRequest) version() int16 { + return 0 +} + +func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go new file mode 100644 index 0000000..6b9632b --- /dev/null +++ b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go @@ -0,0 +1,85 @@ +package sarama + +import ( + "net" + "strconv" +) + +type ConsumerMetadataResponse struct { + Err KError + Coordinator *Broker + CoordinatorID int32 // deprecated: use Coordinator.ID() + CoordinatorHost string // deprecated: use Coordinator.Addr() + CoordinatorPort int32 // deprecated: use Coordinator.Addr() +} + +func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(tmp) + + coordinator := new(Broker) + if err := coordinator.decode(pd); err != nil { + return err + } + if coordinator.addr == ":0" { + return nil + } + r.Coordinator = coordinator + + // this can all go away in 2.0, but we have to fill in deprecated fields to maintain + // backwards compatibility + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + r.CoordinatorID = r.Coordinator.ID() + r.CoordinatorHost = host + r.CoordinatorPort = int32(port) + + return nil +} + +func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + if r.Coordinator != nil { + host, portstr, err := net.SplitHostPort(r.Coordinator.Addr()) + if err != nil { + return err + } + port, err := strconv.ParseInt(portstr, 10, 32) + if err != nil { + return err + } + pe.putInt32(r.Coordinator.ID()) + if err := pe.putString(host); err != nil { + return err + } + pe.putInt32(int32(port)) + return nil + } + pe.putInt32(r.CoordinatorID) + if err := pe.putString(r.CoordinatorHost); err != nil { + return err + } + pe.putInt32(r.CoordinatorPort) + return nil +} + +func (r *ConsumerMetadataResponse) key() int16 { + return 10 +} + +func (r *ConsumerMetadataResponse) version() int16 { + return 0 +} + +func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion { + return V0_8_2_0 +} diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go new file mode 100644 index 0000000..f4fde18 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/crc32_field.go @@ -0,0 +1,35 @@ +package sarama + +import ( + "encoding/binary" + "hash/crc32" +) + +// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s. +type crc32Field struct { + startOffset int +} + +func (c *crc32Field) saveOffset(in int) { + c.startOffset = in +} + +func (c *crc32Field) reserveLength() int { + return 4 +} + +func (c *crc32Field) run(curOffset int, buf []byte) error { + crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + binary.BigEndian.PutUint32(buf[c.startOffset:], crc) + return nil +} + +func (c *crc32Field) check(curOffset int, buf []byte) error { + crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset]) + + if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) { + return PacketDecodingError{"CRC didn't match"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go new file mode 100644 index 0000000..1fb3567 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_request.go @@ -0,0 +1,30 @@ +package sarama + +type DescribeGroupsRequest struct { + Groups []string +} + +func (r *DescribeGroupsRequest) encode(pe packetEncoder) error { + return pe.putStringArray(r.Groups) +} + +func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + r.Groups, err = pd.getStringArray() + return +} + +func (r *DescribeGroupsRequest) key() int16 { + return 15 +} + +func (r *DescribeGroupsRequest) version() int16 { + return 0 +} + +func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *DescribeGroupsRequest) AddGroup(group string) { + r.Groups = append(r.Groups, group) +} diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go new file mode 100644 index 0000000..542b3a9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/describe_groups_response.go @@ -0,0 +1,187 @@ +package sarama + +type DescribeGroupsResponse struct { + Groups []*GroupDescription +} + +func (r *DescribeGroupsResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + + for _, groupDescription := range r.Groups { + if err := groupDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Groups = make([]*GroupDescription, n) + for i := 0; i < n; i++ { + r.Groups[i] = new(GroupDescription) + if err := r.Groups[i].decode(pd); err != nil { + return err + } + } + + return nil +} + +func (r *DescribeGroupsResponse) key() int16 { + return 15 +} + +func (r *DescribeGroupsResponse) version() int16 { + return 0 +} + +func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +type GroupDescription struct { + Err KError + GroupId string + State string + ProtocolType string + Protocol string + Members map[string]*GroupMemberDescription +} + +func (gd *GroupDescription) encode(pe packetEncoder) error { + pe.putInt16(int16(gd.Err)) + + if err := pe.putString(gd.GroupId); err != nil { + return err + } + if err := pe.putString(gd.State); err != nil { + return err + } + if err := pe.putString(gd.ProtocolType); err != nil { + return err + } + if err := pe.putString(gd.Protocol); err != nil { + return err + } + + if err := pe.putArrayLength(len(gd.Members)); err != nil { + return err + } + + for memberId, groupMemberDescription := range gd.Members { + if err := pe.putString(memberId); err != nil { + return err + } + if err := groupMemberDescription.encode(pe); err != nil { + return err + } + } + + return nil +} + +func (gd *GroupDescription) decode(pd packetDecoder) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + gd.Err = KError(kerr) + + if gd.GroupId, err = pd.getString(); err != nil { + return + } + if gd.State, err = pd.getString(); err != nil { + return + } + if gd.ProtocolType, err = pd.getString(); err != nil { + return + } + if gd.Protocol, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + gd.Members = make(map[string]*GroupMemberDescription) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + gd.Members[memberId] = new(GroupMemberDescription) + if err := gd.Members[memberId].decode(pd); err != nil { + return err + } + } + + return nil +} + +type GroupMemberDescription struct { + ClientId string + ClientHost string + MemberMetadata []byte + MemberAssignment []byte +} + +func (gmd *GroupMemberDescription) encode(pe packetEncoder) error { + if err := pe.putString(gmd.ClientId); err != nil { + return err + } + if err := pe.putString(gmd.ClientHost); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberMetadata); err != nil { + return err + } + if err := pe.putBytes(gmd.MemberAssignment); err != nil { + return err + } + + return nil +} + +func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) { + if gmd.ClientId, err = pd.getString(); err != nil { + return + } + if gmd.ClientHost, err = pd.getString(); err != nil { + return + } + if gmd.MemberMetadata, err = pd.getBytes(); err != nil { + return + } + if gmd.MemberAssignment, err = pd.getBytes(); err != nil { + return + } + + return nil +} + +func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(gmd.MemberAssignment, assignment) + return assignment, err +} + +func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) { + metadata := new(ConsumerGroupMemberMetadata) + err := decode(gmd.MemberMetadata, metadata) + return metadata, err +} diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go new file mode 100644 index 0000000..7ce3bc0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/encoder_decoder.go @@ -0,0 +1,89 @@ +package sarama + +import ( + "fmt" + + "github.com/rcrowley/go-metrics" +) + +// Encoder is the interface that wraps the basic Encode method. +// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules. +type encoder interface { + encode(pe packetEncoder) error +} + +// Encode takes an Encoder and turns it into bytes while potentially recording metrics. +func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) { + if e == nil { + return nil, nil + } + + var prepEnc prepEncoder + var realEnc realEncoder + + err := e.encode(&prepEnc) + if err != nil { + return nil, err + } + + if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) { + return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)} + } + + realEnc.raw = make([]byte, prepEnc.length) + realEnc.registry = metricRegistry + err = e.encode(&realEnc) + if err != nil { + return nil, err + } + + return realEnc.raw, nil +} + +// Decoder is the interface that wraps the basic Decode method. +// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules. +type decoder interface { + decode(pd packetDecoder) error +} + +type versionedDecoder interface { + decode(pd packetDecoder, version int16) error +} + +// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes, +// interpreted using Kafka's encoding rules. +func decode(buf []byte, in decoder) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} + +func versionedDecode(buf []byte, in versionedDecoder, version int16) error { + if buf == nil { + return nil + } + + helper := realDecoder{raw: buf} + err := in.decode(&helper, version) + if err != nil { + return err + } + + if helper.off != len(buf) { + return PacketDecodingError{"invalid length"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go new file mode 100644 index 0000000..e6800ed --- /dev/null +++ b/vendor/github.com/Shopify/sarama/errors.go @@ -0,0 +1,221 @@ +package sarama + +import ( + "errors" + "fmt" +) + +// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored +// or otherwise failed to respond. +var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)") + +// ErrClosedClient is the error returned when a method is called on a client that has been closed. +var ErrClosedClient = errors.New("kafka: tried to use a client that was closed") + +// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does +// not contain the expected information. +var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks") + +// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index +// (meaning one outside of the range [0...numPartitions-1]). +var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index") + +// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting. +var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated") + +// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected. +var ErrNotConnected = errors.New("kafka: broker not connected") + +// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected +// when requesting messages, since as an optimization the server is allowed to return a partial message at the end +// of the message set. +var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected") + +// ErrShuttingDown is returned when a producer receives a message during shutdown. +var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down") + +// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max +var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max") + +// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example, +// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that. +type PacketEncodingError struct { + Info string +} + +func (err PacketEncodingError) Error() string { + return fmt.Sprintf("kafka: error encoding packet: %s", err.Info) +} + +// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response. +// This can be a bad CRC or length field, or any other invalid value. +type PacketDecodingError struct { + Info string +} + +func (err PacketDecodingError) Error() string { + return fmt.Sprintf("kafka: error decoding packet: %s", err.Info) +} + +// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer) +// when the specified configuration is invalid. +type ConfigurationError string + +func (err ConfigurationError) Error() string { + return "kafka: invalid configuration (" + string(err) + ")" +} + +// KError is the type of error that can be returned directly by the Kafka broker. +// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes +type KError int16 + +// Numeric error codes returned by the Kafka server. +const ( + ErrNoError KError = 0 + ErrUnknown KError = -1 + ErrOffsetOutOfRange KError = 1 + ErrInvalidMessage KError = 2 + ErrUnknownTopicOrPartition KError = 3 + ErrInvalidMessageSize KError = 4 + ErrLeaderNotAvailable KError = 5 + ErrNotLeaderForPartition KError = 6 + ErrRequestTimedOut KError = 7 + ErrBrokerNotAvailable KError = 8 + ErrReplicaNotAvailable KError = 9 + ErrMessageSizeTooLarge KError = 10 + ErrStaleControllerEpochCode KError = 11 + ErrOffsetMetadataTooLarge KError = 12 + ErrNetworkException KError = 13 + ErrOffsetsLoadInProgress KError = 14 + ErrConsumerCoordinatorNotAvailable KError = 15 + ErrNotCoordinatorForConsumer KError = 16 + ErrInvalidTopic KError = 17 + ErrMessageSetSizeTooLarge KError = 18 + ErrNotEnoughReplicas KError = 19 + ErrNotEnoughReplicasAfterAppend KError = 20 + ErrInvalidRequiredAcks KError = 21 + ErrIllegalGeneration KError = 22 + ErrInconsistentGroupProtocol KError = 23 + ErrInvalidGroupId KError = 24 + ErrUnknownMemberId KError = 25 + ErrInvalidSessionTimeout KError = 26 + ErrRebalanceInProgress KError = 27 + ErrInvalidCommitOffsetSize KError = 28 + ErrTopicAuthorizationFailed KError = 29 + ErrGroupAuthorizationFailed KError = 30 + ErrClusterAuthorizationFailed KError = 31 + ErrInvalidTimestamp KError = 32 + ErrUnsupportedSASLMechanism KError = 33 + ErrIllegalSASLState KError = 34 + ErrUnsupportedVersion KError = 35 + ErrTopicAlreadyExists KError = 36 + ErrInvalidPartitions KError = 37 + ErrInvalidReplicationFactor KError = 38 + ErrInvalidReplicaAssignment KError = 39 + ErrInvalidConfig KError = 40 + ErrNotController KError = 41 + ErrInvalidRequest KError = 42 + ErrUnsupportedForMessageFormat KError = 43 + ErrPolicyViolation KError = 44 +) + +func (err KError) Error() string { + // Error messages stolen/adapted from + // https://kafka.apache.org/protocol#protocol_error_codes + switch err { + case ErrNoError: + return "kafka server: Not an error, why are you printing me?" + case ErrUnknown: + return "kafka server: Unexpected (unknown?) server error." + case ErrOffsetOutOfRange: + return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition." + case ErrInvalidMessage: + return "kafka server: Message contents does not match its CRC." + case ErrUnknownTopicOrPartition: + return "kafka server: Request was for a topic or partition that does not exist on this broker." + case ErrInvalidMessageSize: + return "kafka server: The message has a negative size." + case ErrLeaderNotAvailable: + return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes." + case ErrNotLeaderForPartition: + return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date." + case ErrRequestTimedOut: + return "kafka server: Request exceeded the user-specified time limit in the request." + case ErrBrokerNotAvailable: + return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!" + case ErrReplicaNotAvailable: + return "kafka server: Replica information not available, one or more brokers are down." + case ErrMessageSizeTooLarge: + return "kafka server: Message was too large, server rejected it to avoid allocation error." + case ErrStaleControllerEpochCode: + return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)." + case ErrOffsetMetadataTooLarge: + return "kafka server: Specified a string larger than the configured maximum for offset metadata." + case ErrNetworkException: + return "kafka server: The server disconnected before a response was received." + case ErrOffsetsLoadInProgress: + return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition." + case ErrConsumerCoordinatorNotAvailable: + return "kafka server: Offset's topic has not yet been created." + case ErrNotCoordinatorForConsumer: + return "kafka server: Request was for a consumer group that is not coordinated by this broker." + case ErrInvalidTopic: + return "kafka server: The request attempted to perform an operation on an invalid topic." + case ErrMessageSetSizeTooLarge: + return "kafka server: The request included message batch larger than the configured segment size on the server." + case ErrNotEnoughReplicas: + return "kafka server: Messages are rejected since there are fewer in-sync replicas than required." + case ErrNotEnoughReplicasAfterAppend: + return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required." + case ErrInvalidRequiredAcks: + return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)." + case ErrIllegalGeneration: + return "kafka server: The provided generation id is not the current generation." + case ErrInconsistentGroupProtocol: + return "kafka server: The provider group protocol type is incompatible with the other members." + case ErrInvalidGroupId: + return "kafka server: The provided group id was empty." + case ErrUnknownMemberId: + return "kafka server: The provided member is not known in the current generation." + case ErrInvalidSessionTimeout: + return "kafka server: The provided session timeout is outside the allowed range." + case ErrRebalanceInProgress: + return "kafka server: A rebalance for the group is in progress. Please re-join the group." + case ErrInvalidCommitOffsetSize: + return "kafka server: The provided commit metadata was too large." + case ErrTopicAuthorizationFailed: + return "kafka server: The client is not authorized to access this topic." + case ErrGroupAuthorizationFailed: + return "kafka server: The client is not authorized to access this group." + case ErrClusterAuthorizationFailed: + return "kafka server: The client is not authorized to send this request type." + case ErrInvalidTimestamp: + return "kafka server: The timestamp of the message is out of acceptable range." + case ErrUnsupportedSASLMechanism: + return "kafka server: The broker does not support the requested SASL mechanism." + case ErrIllegalSASLState: + return "kafka server: Request is not valid given the current SASL state." + case ErrUnsupportedVersion: + return "kafka server: The version of API is not supported." + case ErrTopicAlreadyExists: + return "kafka server: Topic with this name already exists." + case ErrInvalidPartitions: + return "kafka server: Number of partitions is invalid." + case ErrInvalidReplicationFactor: + return "kafka server: Replication-factor is invalid." + case ErrInvalidReplicaAssignment: + return "kafka server: Replica assignment is invalid." + case ErrInvalidConfig: + return "kafka server: Configuration is invalid." + case ErrNotController: + return "kafka server: This is not the correct controller for this cluster." + case ErrInvalidRequest: + return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details." + case ErrUnsupportedForMessageFormat: + return "kafka server: The requested operation is not supported by the message format version." + case ErrPolicyViolation: + return "kafka server: Request parameters do not satisfy the configured policy." + } + + return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err) +} diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go new file mode 100644 index 0000000..65600e8 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_request.go @@ -0,0 +1,150 @@ +package sarama + +type fetchRequestBlock struct { + fetchOffset int64 + maxBytes int32 +} + +func (b *fetchRequestBlock) encode(pe packetEncoder) error { + pe.putInt64(b.fetchOffset) + pe.putInt32(b.maxBytes) + return nil +} + +func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) { + if b.fetchOffset, err = pd.getInt64(); err != nil { + return err + } + if b.maxBytes, err = pd.getInt32(); err != nil { + return err + } + return nil +} + +// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See +// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at +// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes +type FetchRequest struct { + MaxWaitTime int32 + MinBytes int32 + MaxBytes int32 + Version int16 + blocks map[string]map[int32]*fetchRequestBlock +} + +func (r *FetchRequest) encode(pe packetEncoder) (err error) { + pe.putInt32(-1) // replica ID is always -1 for clients + pe.putInt32(r.MaxWaitTime) + pe.putInt32(r.MinBytes) + if r.Version == 3 { + pe.putInt32(r.MaxBytes) + } + err = pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, blocks := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(blocks)) + if err != nil { + return err + } + for partition, block := range blocks { + pe.putInt32(partition) + err = block.encode(pe) + if err != nil { + return err + } + } + } + return nil +} + +func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if _, err = pd.getInt32(); err != nil { + return err + } + if r.MaxWaitTime, err = pd.getInt32(); err != nil { + return err + } + if r.MinBytes, err = pd.getInt32(); err != nil { + return err + } + if r.Version == 3 { + if r.MaxBytes, err = pd.getInt32(); err != nil { + return err + } + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + fetchBlock := &fetchRequestBlock{} + if err = fetchBlock.decode(pd); err != nil { + return err + } + r.blocks[topic][partition] = fetchBlock + } + } + return nil +} + +func (r *FetchRequest) key() int16 { + return 1 +} + +func (r *FetchRequest) version() int16 { + return r.Version +} + +func (r *FetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + case 3: + return V0_10_1_0 + default: + return minVersion + } +} + +func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*fetchRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*fetchRequestBlock) + } + + tmp := new(fetchRequestBlock) + tmp.maxBytes = maxBytes + tmp.fetchOffset = fetchOffset + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go new file mode 100644 index 0000000..b56b166 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/fetch_response.go @@ -0,0 +1,210 @@ +package sarama + +import "time" + +type FetchResponseBlock struct { + Err KError + HighWaterMarkOffset int64 + MsgSet MessageSet +} + +func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.HighWaterMarkOffset, err = pd.getInt64() + if err != nil { + return err + } + + msgSetSize, err := pd.getInt32() + if err != nil { + return err + } + + msgSetDecoder, err := pd.getSubset(int(msgSetSize)) + if err != nil { + return err + } + err = (&b.MsgSet).decode(msgSetDecoder) + + return err +} + +func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(b.Err)) + + pe.putInt64(b.HighWaterMarkOffset) + + pe.push(&lengthField{}) + err = b.MsgSet.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +type FetchResponse struct { + Blocks map[string]map[int32]*FetchResponseBlock + ThrottleTime time.Duration + Version int16 // v1 requires 0.9+, v2 requires 0.10+ +} + +func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.Version >= 1 { + throttle, err := pd.getInt32() + if err != nil { + return err + } + r.ThrottleTime = time.Duration(throttle) * time.Millisecond + } + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(FetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *FetchResponse) encode(pe packetEncoder) (err error) { + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + + err = pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + + for id, block := range partitions { + pe.putInt32(id) + err = block.encode(pe) + if err != nil { + return err + } + } + + } + return nil +} + +func (r *FetchResponse) key() int16 { + return 1 +} + +func (r *FetchResponse) version() int16 { + return r.Version +} + +func (r *FetchResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + default: + return minVersion + } +} + +func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *FetchResponse) AddError(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + frb.Err = err +} + +func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*FetchResponseBlock) + } + partitions, ok := r.Blocks[topic] + if !ok { + partitions = make(map[int32]*FetchResponseBlock) + r.Blocks[topic] = partitions + } + frb, ok := partitions[partition] + if !ok { + frb = new(FetchResponseBlock) + partitions[partition] = frb + } + var kb []byte + var vb []byte + if key != nil { + kb, _ = key.Encode() + } + if value != nil { + vb, _ = value.Encode() + } + msg := &Message{Key: kb, Value: vb} + msgBlock := &MessageBlock{Msg: msg, Offset: offset} + frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock) +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go new file mode 100644 index 0000000..ce49c47 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_request.go @@ -0,0 +1,47 @@ +package sarama + +type HeartbeatRequest struct { + GroupId string + GenerationId int32 + MemberId string +} + +func (r *HeartbeatRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *HeartbeatRequest) key() int16 { + return 12 +} + +func (r *HeartbeatRequest) version() int16 { + return 0 +} + +func (r *HeartbeatRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go new file mode 100644 index 0000000..766f5fd --- /dev/null +++ b/vendor/github.com/Shopify/sarama/heartbeat_response.go @@ -0,0 +1,32 @@ +package sarama + +type HeartbeatResponse struct { + Err KError +} + +func (r *HeartbeatResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *HeartbeatResponse) key() int16 { + return 12 +} + +func (r *HeartbeatResponse) version() int16 { + return 0 +} + +func (r *HeartbeatResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go new file mode 100644 index 0000000..3a7ba17 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_request.go @@ -0,0 +1,143 @@ +package sarama + +type GroupProtocol struct { + Name string + Metadata []byte +} + +func (p *GroupProtocol) decode(pd packetDecoder) (err error) { + p.Name, err = pd.getString() + if err != nil { + return err + } + p.Metadata, err = pd.getBytes() + return err +} + +func (p *GroupProtocol) encode(pe packetEncoder) (err error) { + if err := pe.putString(p.Name); err != nil { + return err + } + if err := pe.putBytes(p.Metadata); err != nil { + return err + } + return nil +} + +type JoinGroupRequest struct { + GroupId string + SessionTimeout int32 + MemberId string + ProtocolType string + GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols + OrderedGroupProtocols []*GroupProtocol +} + +func (r *JoinGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + pe.putInt32(r.SessionTimeout) + if err := pe.putString(r.MemberId); err != nil { + return err + } + if err := pe.putString(r.ProtocolType); err != nil { + return err + } + + if len(r.GroupProtocols) > 0 { + if len(r.OrderedGroupProtocols) > 0 { + return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"} + } + + if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil { + return err + } + for name, metadata := range r.GroupProtocols { + if err := pe.putString(name); err != nil { + return err + } + if err := pe.putBytes(metadata); err != nil { + return err + } + } + } else { + if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil { + return err + } + for _, protocol := range r.OrderedGroupProtocols { + if err := protocol.encode(pe); err != nil { + return err + } + } + } + + return nil +} + +func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + + if r.SessionTimeout, err = pd.getInt32(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + if r.ProtocolType, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupProtocols = make(map[string][]byte) + for i := 0; i < n; i++ { + protocol := &GroupProtocol{} + if err := protocol.decode(pd); err != nil { + return err + } + r.GroupProtocols[protocol.Name] = protocol.Metadata + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol) + } + + return nil +} + +func (r *JoinGroupRequest) key() int16 { + return 11 +} + +func (r *JoinGroupRequest) version() int16 { + return 0 +} + +func (r *JoinGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) { + r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{ + Name: name, + Metadata: metadata, + }) +} + +func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error { + bin, err := encode(metadata, nil) + if err != nil { + return err + } + + r.AddGroupProtocol(name, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go new file mode 100644 index 0000000..6d35fe3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/join_group_response.go @@ -0,0 +1,115 @@ +package sarama + +type JoinGroupResponse struct { + Err KError + GenerationId int32 + GroupProtocol string + LeaderId string + MemberId string + Members map[string][]byte +} + +func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) { + members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members)) + for id, bin := range r.Members { + meta := new(ConsumerGroupMemberMetadata) + if err := decode(bin, meta); err != nil { + return nil, err + } + members[id] = *meta + } + return members, nil +} + +func (r *JoinGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.GroupProtocol); err != nil { + return err + } + if err := pe.putString(r.LeaderId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.Members)); err != nil { + return err + } + + for memberId, memberMetadata := range r.Members { + if err := pe.putString(memberId); err != nil { + return err + } + + if err := pe.putBytes(memberMetadata); err != nil { + return err + } + } + + return nil +} + +func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + + if r.GroupProtocol, err = pd.getString(); err != nil { + return + } + + if r.LeaderId, err = pd.getString(); err != nil { + return + } + + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Members = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + + memberMetadata, err := pd.getBytes() + if err != nil { + return err + } + + r.Members[memberId] = memberMetadata + } + + return nil +} + +func (r *JoinGroupResponse) key() int16 { + return 11 +} + +func (r *JoinGroupResponse) version() int16 { + return 0 +} + +func (r *JoinGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go new file mode 100644 index 0000000..e177427 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_request.go @@ -0,0 +1,40 @@ +package sarama + +type LeaveGroupRequest struct { + GroupId string + MemberId string +} + +func (r *LeaveGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + if err := pe.putString(r.MemberId); err != nil { + return err + } + + return nil +} + +func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + return nil +} + +func (r *LeaveGroupRequest) key() int16 { + return 13 +} + +func (r *LeaveGroupRequest) version() int16 { + return 0 +} + +func (r *LeaveGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go new file mode 100644 index 0000000..d60c626 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/leave_group_response.go @@ -0,0 +1,32 @@ +package sarama + +type LeaveGroupResponse struct { + Err KError +} + +func (r *LeaveGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return nil +} + +func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + r.Err = KError(kerr) + + return nil +} + +func (r *LeaveGroupResponse) key() int16 { + return 13 +} + +func (r *LeaveGroupResponse) version() int16 { + return 0 +} + +func (r *LeaveGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go new file mode 100644 index 0000000..70078be --- /dev/null +++ b/vendor/github.com/Shopify/sarama/length_field.go @@ -0,0 +1,29 @@ +package sarama + +import "encoding/binary" + +// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths. +type lengthField struct { + startOffset int +} + +func (l *lengthField) saveOffset(in int) { + l.startOffset = in +} + +func (l *lengthField) reserveLength() int { + return 4 +} + +func (l *lengthField) run(curOffset int, buf []byte) error { + binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4)) + return nil +} + +func (l *lengthField) check(curOffset int, buf []byte) error { + if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) { + return PacketDecodingError{"length field invalid"} + } + + return nil +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go new file mode 100644 index 0000000..3b16abf --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_request.go @@ -0,0 +1,24 @@ +package sarama + +type ListGroupsRequest struct { +} + +func (r *ListGroupsRequest) encode(pe packetEncoder) error { + return nil +} + +func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) { + return nil +} + +func (r *ListGroupsRequest) key() int16 { + return 16 +} + +func (r *ListGroupsRequest) version() int16 { + return 0 +} + +func (r *ListGroupsRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go new file mode 100644 index 0000000..56115d4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/list_groups_response.go @@ -0,0 +1,69 @@ +package sarama + +type ListGroupsResponse struct { + Err KError + Groups map[string]string +} + +func (r *ListGroupsResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + + if err := pe.putArrayLength(len(r.Groups)); err != nil { + return err + } + for groupId, protocolType := range r.Groups { + if err := pe.putString(groupId); err != nil { + return err + } + if err := pe.putString(protocolType); err != nil { + return err + } + } + + return nil +} + +func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.Groups = make(map[string]string) + for i := 0; i < n; i++ { + groupId, err := pd.getString() + if err != nil { + return err + } + protocolType, err := pd.getString() + if err != nil { + return err + } + + r.Groups[groupId] = protocolType + } + + return nil +} + +func (r *ListGroupsResponse) key() int16 { + return 16 +} + +func (r *ListGroupsResponse) version() int16 { + return 0 +} + +func (r *ListGroupsResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go new file mode 100644 index 0000000..86b4ac3 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message.go @@ -0,0 +1,212 @@ +package sarama + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + "time" + + "github.com/eapache/go-xerial-snappy" + "github.com/pierrec/lz4" +) + +// CompressionCodec represents the various compression codecs recognized by Kafka in messages. +type CompressionCodec int8 + +// only the last two bits are really used +const compressionCodecMask int8 = 0x03 + +const ( + CompressionNone CompressionCodec = 0 + CompressionGZIP CompressionCodec = 1 + CompressionSnappy CompressionCodec = 2 + CompressionLZ4 CompressionCodec = 3 +) + +type Message struct { + Codec CompressionCodec // codec used to compress the message contents + Key []byte // the message key, may be nil + Value []byte // the message contents + Set *MessageSet // the message set a message might wrap + Version int8 // v1 requires Kafka 0.10 + Timestamp time.Time // the timestamp of the message (version 1+ only) + + compressedCache []byte + compressedSize int // used for computing the compression ratio metrics +} + +func (m *Message) encode(pe packetEncoder) error { + pe.push(&crc32Field{}) + + pe.putInt8(m.Version) + + attributes := int8(m.Codec) & compressionCodecMask + pe.putInt8(attributes) + + if m.Version >= 1 { + timestamp := int64(-1) + + if !m.Timestamp.Before(time.Unix(0, 0)) { + timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond) + } else if !m.Timestamp.IsZero() { + return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)} + } + + pe.putInt64(timestamp) + } + + err := pe.putBytes(m.Key) + if err != nil { + return err + } + + var payload []byte + + if m.compressedCache != nil { + payload = m.compressedCache + m.compressedCache = nil + } else if m.Value != nil { + switch m.Codec { + case CompressionNone: + payload = m.Value + case CompressionGZIP: + var buf bytes.Buffer + writer := gzip.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + case CompressionSnappy: + tmp := snappy.Encode(m.Value) + m.compressedCache = tmp + payload = m.compressedCache + case CompressionLZ4: + var buf bytes.Buffer + writer := lz4.NewWriter(&buf) + if _, err = writer.Write(m.Value); err != nil { + return err + } + if err = writer.Close(); err != nil { + return err + } + m.compressedCache = buf.Bytes() + payload = m.compressedCache + + default: + return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)} + } + // Keep in mind the compressed payload size for metric gathering + m.compressedSize = len(payload) + } + + if err = pe.putBytes(payload); err != nil { + return err + } + + return pe.pop() +} + +func (m *Message) decode(pd packetDecoder) (err error) { + err = pd.push(&crc32Field{}) + if err != nil { + return err + } + + m.Version, err = pd.getInt8() + if err != nil { + return err + } + + attribute, err := pd.getInt8() + if err != nil { + return err + } + m.Codec = CompressionCodec(attribute & compressionCodecMask) + + if m.Version >= 1 { + millis, err := pd.getInt64() + if err != nil { + return err + } + + // negative timestamps are invalid, in these cases we should return + // a zero time + timestamp := time.Time{} + if millis >= 0 { + timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + + m.Timestamp = timestamp + } + + m.Key, err = pd.getBytes() + if err != nil { + return err + } + + m.Value, err = pd.getBytes() + if err != nil { + return err + } + + // Required for deep equal assertion during tests but might be useful + // for future metrics about the compression ratio in fetch requests + m.compressedSize = len(m.Value) + + switch m.Codec { + case CompressionNone: + // nothing to do + case CompressionGZIP: + if m.Value == nil { + break + } + reader, err := gzip.NewReader(bytes.NewReader(m.Value)) + if err != nil { + return err + } + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + case CompressionSnappy: + if m.Value == nil { + break + } + if m.Value, err = snappy.Decode(m.Value); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + case CompressionLZ4: + if m.Value == nil { + break + } + reader := lz4.NewReader(bytes.NewReader(m.Value)) + if m.Value, err = ioutil.ReadAll(reader); err != nil { + return err + } + if err := m.decodeSet(); err != nil { + return err + } + + default: + return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)} + } + + return pd.pop() +} + +// decodes a message set from a previousy encoded bulk-message +func (m *Message) decodeSet() (err error) { + pd := realDecoder{raw: m.Value} + m.Set = &MessageSet{} + return m.Set.decode(&pd) +} diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go new file mode 100644 index 0000000..f028784 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/message_set.go @@ -0,0 +1,89 @@ +package sarama + +type MessageBlock struct { + Offset int64 + Msg *Message +} + +// Messages convenience helper which returns either all the +// messages that are wrapped in this block +func (msb *MessageBlock) Messages() []*MessageBlock { + if msb.Msg.Set != nil { + return msb.Msg.Set.Messages + } + return []*MessageBlock{msb} +} + +func (msb *MessageBlock) encode(pe packetEncoder) error { + pe.putInt64(msb.Offset) + pe.push(&lengthField{}) + err := msb.Msg.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (msb *MessageBlock) decode(pd packetDecoder) (err error) { + if msb.Offset, err = pd.getInt64(); err != nil { + return err + } + + if err = pd.push(&lengthField{}); err != nil { + return err + } + + msb.Msg = new(Message) + if err = msb.Msg.decode(pd); err != nil { + return err + } + + if err = pd.pop(); err != nil { + return err + } + + return nil +} + +type MessageSet struct { + PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock + Messages []*MessageBlock +} + +func (ms *MessageSet) encode(pe packetEncoder) error { + for i := range ms.Messages { + err := ms.Messages[i].encode(pe) + if err != nil { + return err + } + } + return nil +} + +func (ms *MessageSet) decode(pd packetDecoder) (err error) { + ms.Messages = nil + + for pd.remaining() > 0 { + msb := new(MessageBlock) + err = msb.decode(pd) + switch err { + case nil: + ms.Messages = append(ms.Messages, msb) + case ErrInsufficientData: + // As an optimization the server is allowed to return a partial message at the + // end of the message set. Clients should handle this case. So we just ignore such things. + ms.PartialTrailingMessage = true + return nil + default: + return err + } + } + + return nil +} + +func (ms *MessageSet) addMessage(msg *Message) { + block := new(MessageBlock) + block.Msg = msg + ms.Messages = append(ms.Messages, block) +} diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go new file mode 100644 index 0000000..9a26b55 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_request.go @@ -0,0 +1,52 @@ +package sarama + +type MetadataRequest struct { + Topics []string +} + +func (r *MetadataRequest) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + + for i := range r.Topics { + err = pe.putString(r.Topics[i]) + if err != nil { + return err + } + } + return nil +} + +func (r *MetadataRequest) decode(pd packetDecoder, version int16) error { + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + + r.Topics = make([]string, topicCount) + for i := range r.Topics { + topic, err := pd.getString() + if err != nil { + return err + } + r.Topics[i] = topic + } + return nil +} + +func (r *MetadataRequest) key() int16 { + return 3 +} + +func (r *MetadataRequest) version() int16 { + return 0 +} + +func (r *MetadataRequest) requiredVersion() KafkaVersion { + return minVersion +} diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go new file mode 100644 index 0000000..f9d6a42 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metadata_response.go @@ -0,0 +1,239 @@ +package sarama + +type PartitionMetadata struct { + Err KError + ID int32 + Leader int32 + Replicas []int32 + Isr []int32 +} + +func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + pm.Err = KError(tmp) + + pm.ID, err = pd.getInt32() + if err != nil { + return err + } + + pm.Leader, err = pd.getInt32() + if err != nil { + return err + } + + pm.Replicas, err = pd.getInt32Array() + if err != nil { + return err + } + + pm.Isr, err = pd.getInt32Array() + if err != nil { + return err + } + + return nil +} + +func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(pm.Err)) + pe.putInt32(pm.ID) + pe.putInt32(pm.Leader) + + err = pe.putInt32Array(pm.Replicas) + if err != nil { + return err + } + + err = pe.putInt32Array(pm.Isr) + if err != nil { + return err + } + + return nil +} + +type TopicMetadata struct { + Err KError + Name string + Partitions []*PartitionMetadata +} + +func (tm *TopicMetadata) decode(pd packetDecoder) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + tm.Err = KError(tmp) + + tm.Name, err = pd.getString() + if err != nil { + return err + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + tm.Partitions = make([]*PartitionMetadata, n) + for i := 0; i < n; i++ { + tm.Partitions[i] = new(PartitionMetadata) + err = tm.Partitions[i].decode(pd) + if err != nil { + return err + } + } + + return nil +} + +func (tm *TopicMetadata) encode(pe packetEncoder) (err error) { + pe.putInt16(int16(tm.Err)) + + err = pe.putString(tm.Name) + if err != nil { + return err + } + + err = pe.putArrayLength(len(tm.Partitions)) + if err != nil { + return err + } + + for _, pm := range tm.Partitions { + err = pm.encode(pe) + if err != nil { + return err + } + } + + return nil +} + +type MetadataResponse struct { + Brokers []*Broker + Topics []*TopicMetadata +} + +func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) { + n, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Brokers = make([]*Broker, n) + for i := 0; i < n; i++ { + r.Brokers[i] = new(Broker) + err = r.Brokers[i].decode(pd) + if err != nil { + return err + } + } + + n, err = pd.getArrayLength() + if err != nil { + return err + } + + r.Topics = make([]*TopicMetadata, n) + for i := 0; i < n; i++ { + r.Topics[i] = new(TopicMetadata) + err = r.Topics[i].decode(pd) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Brokers)) + if err != nil { + return err + } + for _, broker := range r.Brokers { + err = broker.encode(pe) + if err != nil { + return err + } + } + + err = pe.putArrayLength(len(r.Topics)) + if err != nil { + return err + } + for _, tm := range r.Topics { + err = tm.encode(pe) + if err != nil { + return err + } + } + + return nil +} + +func (r *MetadataResponse) key() int16 { + return 3 +} + +func (r *MetadataResponse) version() int16 { + return 0 +} + +func (r *MetadataResponse) requiredVersion() KafkaVersion { + return minVersion +} + +// testing API + +func (r *MetadataResponse) AddBroker(addr string, id int32) { + r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr}) +} + +func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata { + var tmatch *TopicMetadata + + for _, tm := range r.Topics { + if tm.Name == topic { + tmatch = tm + goto foundTopic + } + } + + tmatch = new(TopicMetadata) + tmatch.Name = topic + r.Topics = append(r.Topics, tmatch) + +foundTopic: + + tmatch.Err = err + return tmatch +} + +func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) { + tmatch := r.AddTopic(topic, ErrNoError) + var pmatch *PartitionMetadata + + for _, pm := range tmatch.Partitions { + if pm.ID == partition { + pmatch = pm + goto foundPartition + } + } + + pmatch = new(PartitionMetadata) + pmatch.ID = partition + tmatch.Partitions = append(tmatch.Partitions, pmatch) + +foundPartition: + + pmatch.Leader = brokerID + pmatch.Replicas = replicas + pmatch.Isr = isr + pmatch.Err = err + +} diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go new file mode 100644 index 0000000..4869708 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/metrics.go @@ -0,0 +1,51 @@ +package sarama + +import ( + "fmt" + "strings" + + "github.com/rcrowley/go-metrics" +) + +// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library: +// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution, +// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements. +// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38 +const ( + metricsReservoirSize = 1028 + metricsAlphaFactor = 0.015 +) + +func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram { + return r.GetOrRegister(name, func() metrics.Histogram { + return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor)) + }).(metrics.Histogram) +} + +func getMetricNameForBroker(name string, broker *Broker) string { + // Use broker id like the Java client as it does not contain '.' or ':' characters that + // can be interpreted as special character by monitoring tool (e.g. Graphite) + return fmt.Sprintf(name+"-for-broker-%d", broker.ID()) +} + +func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r) +} + +func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r) +} + +func getMetricNameForTopic(name string, topic string) string { + // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy + // cf. KAFKA-1902 and KAFKA-2337 + return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1)) +} + +func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter { + return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r) +} + +func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram { + return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r) +} diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go new file mode 100644 index 0000000..0734d34 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockbroker.go @@ -0,0 +1,324 @@ +package sarama + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net" + "reflect" + "strconv" + "sync" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const ( + expectationTimeout = 500 * time.Millisecond +) + +type requestHandlerFunc func(req *request) (res encoder) + +// RequestNotifierFunc is invoked when a mock broker processes a request successfully +// and will provides the number of bytes read and written. +type RequestNotifierFunc func(bytesRead, bytesWritten int) + +// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed +// to facilitate testing of higher level or specialized consumers and producers +// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol, +// but rather provides a facility to do that. It takes care of the TCP +// transport, request unmarshaling, response marshaling, and makes it the test +// writer responsibility to program correct according to the Kafka API protocol +// MockBroker behaviour. +// +// MockBroker is implemented as a TCP server listening on a kernel-selected +// localhost port that can accept many connections. It reads Kafka requests +// from that connection and returns responses programmed by the SetHandlerByMap +// function. If a MockBroker receives a request that it has no programmed +// response for, then it returns nothing and the request times out. +// +// A set of MockRequest builders to define mappings used by MockBroker is +// provided by Sarama. But users can develop MockRequests of their own and use +// them along with or instead of the standard ones. +// +// When running tests with MockBroker it is strongly recommended to specify +// a timeout to `go test` so that if the broker hangs waiting for a response, +// the test panics. +// +// It is not necessary to prefix message length or correlation ID to your +// response bytes, the server does that automatically as a convenience. +type MockBroker struct { + brokerID int32 + port int32 + closing chan none + stopper chan none + expectations chan encoder + listener net.Listener + t TestReporter + latency time.Duration + handler requestHandlerFunc + notifier RequestNotifierFunc + history []RequestResponse + lock sync.Mutex +} + +// RequestResponse represents a Request/Response pair processed by MockBroker. +type RequestResponse struct { + Request protocolBody + Response encoder +} + +// SetLatency makes broker pause for the specified period every time before +// replying. +func (b *MockBroker) SetLatency(latency time.Duration) { + b.latency = latency +} + +// SetHandlerByMap defines mapping of Request types to MockResponses. When a +// request is received by the broker, it looks up the request type in the map +// and uses the found MockResponse instance to generate an appropriate reply. +// If the request type is not found in the map then nothing is sent. +func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) { + b.setHandler(func(req *request) (res encoder) { + reqTypeName := reflect.TypeOf(req.body).Elem().Name() + mockResponse := handlerMap[reqTypeName] + if mockResponse == nil { + return nil + } + return mockResponse.For(req.body) + }) +} + +// SetNotifier set a function that will get invoked whenever a request has been +// processed successfully and will provide the number of bytes read and written +func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) { + b.lock.Lock() + b.notifier = notifier + b.lock.Unlock() +} + +// BrokerID returns broker ID assigned to the broker. +func (b *MockBroker) BrokerID() int32 { + return b.brokerID +} + +// History returns a slice of RequestResponse pairs in the order they were +// processed by the broker. Note that in case of multiple connections to the +// broker the order expected by a test can be different from the order recorded +// in the history, unless some synchronization is implemented in the test. +func (b *MockBroker) History() []RequestResponse { + b.lock.Lock() + history := make([]RequestResponse, len(b.history)) + copy(history, b.history) + b.lock.Unlock() + return history +} + +// Port returns the TCP port number the broker is listening for requests on. +func (b *MockBroker) Port() int32 { + return b.port +} + +// Addr returns the broker connection string in the form "
:". +func (b *MockBroker) Addr() string { + return b.listener.Addr().String() +} + +// Close terminates the broker blocking until it stops internal goroutines and +// releases all resources. +func (b *MockBroker) Close() { + close(b.expectations) + if len(b.expectations) > 0 { + buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID())) + for e := range b.expectations { + _, _ = buf.WriteString(spew.Sdump(e)) + } + b.t.Error(buf.String()) + } + close(b.closing) + <-b.stopper +} + +// setHandler sets the specified function as the request handler. Whenever +// a mock broker reads a request from the wire it passes the request to the +// function and sends back whatever the handler function returns. +func (b *MockBroker) setHandler(handler requestHandlerFunc) { + b.lock.Lock() + b.handler = handler + b.lock.Unlock() +} + +func (b *MockBroker) serverLoop() { + defer close(b.stopper) + var err error + var conn net.Conn + + go func() { + <-b.closing + err := b.listener.Close() + if err != nil { + b.t.Error(err) + } + }() + + wg := &sync.WaitGroup{} + i := 0 + for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() { + wg.Add(1) + go b.handleRequests(conn, i, wg) + i++ + } + wg.Wait() + Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err) +} + +func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) { + defer wg.Done() + defer func() { + _ = conn.Close() + }() + Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx) + var err error + + abort := make(chan none) + defer close(abort) + go func() { + select { + case <-b.closing: + _ = conn.Close() + case <-abort: + } + }() + + resHeader := make([]byte, 8) + for { + req, bytesRead, err := decodeRequest(conn) + if err != nil { + Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req)) + b.serverError(err) + break + } + + if b.latency > 0 { + time.Sleep(b.latency) + } + + b.lock.Lock() + res := b.handler(req) + b.history = append(b.history, RequestResponse{req.body, res}) + b.lock.Unlock() + + if res == nil { + Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req)) + continue + } + Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res) + + encodedRes, err := encode(res, nil) + if err != nil { + b.serverError(err) + break + } + if len(encodedRes) == 0 { + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, 0) + } + b.lock.Unlock() + continue + } + + binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4)) + binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID)) + if _, err = conn.Write(resHeader); err != nil { + b.serverError(err) + break + } + if _, err = conn.Write(encodedRes); err != nil { + b.serverError(err) + break + } + + b.lock.Lock() + if b.notifier != nil { + b.notifier(bytesRead, len(resHeader)+len(encodedRes)) + } + b.lock.Unlock() + } + Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err) +} + +func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) { + select { + case res, ok := <-b.expectations: + if !ok { + return nil + } + return res + case <-time.After(expectationTimeout): + return nil + } +} + +func (b *MockBroker) serverError(err error) { + isConnectionClosedError := false + if _, ok := err.(*net.OpError); ok { + isConnectionClosedError = true + } else if err == io.EOF { + isConnectionClosedError = true + } else if err.Error() == "use of closed network connection" { + isConnectionClosedError = true + } + + if isConnectionClosedError { + return + } + + b.t.Errorf(err.Error()) +} + +// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the +// test framework and a channel of responses to use. If an error occurs it is +// simply logged to the TestReporter and the broker exits. +func NewMockBroker(t TestReporter, brokerID int32) *MockBroker { + return NewMockBrokerAddr(t, brokerID, "localhost:0") +} + +// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give +// it rather than just some ephemeral port. +func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker { + var err error + + broker := &MockBroker{ + closing: make(chan none), + stopper: make(chan none), + t: t, + brokerID: brokerID, + expectations: make(chan encoder, 512), + } + broker.handler = broker.defaultRequestHandler + + broker.listener, err = net.Listen("tcp", addr) + if err != nil { + t.Fatal(err) + } + Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String()) + _, portStr, err := net.SplitHostPort(broker.listener.Addr().String()) + if err != nil { + t.Fatal(err) + } + tmp, err := strconv.ParseInt(portStr, 10, 32) + if err != nil { + t.Fatal(err) + } + broker.port = int32(tmp) + + go broker.serverLoop() + + return broker +} + +func (b *MockBroker) Returns(e encoder) { + b.expectations <- e +} diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go new file mode 100644 index 0000000..a203142 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/mockresponses.go @@ -0,0 +1,455 @@ +package sarama + +import ( + "fmt" +) + +// TestReporter has methods matching go's testing.T to avoid importing +// `testing` in the main part of the library. +type TestReporter interface { + Error(...interface{}) + Errorf(string, ...interface{}) + Fatal(...interface{}) + Fatalf(string, ...interface{}) +} + +// MockResponse is a response builder interface it defines one method that +// allows generating a response based on a request body. MockResponses are used +// to program behavior of MockBroker in tests. +type MockResponse interface { + For(reqBody versionedDecoder) (res encoder) +} + +// MockWrapper is a mock response builder that returns a particular concrete +// response regardless of the actual request passed to the `For` method. +type MockWrapper struct { + res encoder +} + +func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) { + return mw.res +} + +func NewMockWrapper(res encoder) *MockWrapper { + return &MockWrapper{res: res} +} + +// MockSequence is a mock response builder that is created from a sequence of +// concrete responses. Every time when a `MockBroker` calls its `For` method +// the next response from the sequence is returned. When the end of the +// sequence is reached the last element from the sequence is returned. +type MockSequence struct { + responses []MockResponse +} + +func NewMockSequence(responses ...interface{}) *MockSequence { + ms := &MockSequence{} + ms.responses = make([]MockResponse, len(responses)) + for i, res := range responses { + switch res := res.(type) { + case MockResponse: + ms.responses[i] = res + case encoder: + ms.responses[i] = NewMockWrapper(res) + default: + panic(fmt.Sprintf("Unexpected response type: %T", res)) + } + } + return ms +} + +func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) { + res = mc.responses[0].For(reqBody) + if len(mc.responses) > 1 { + mc.responses = mc.responses[1:] + } + return res +} + +// MockMetadataResponse is a `MetadataResponse` builder. +type MockMetadataResponse struct { + leaders map[string]map[int32]int32 + brokers map[string]int32 + t TestReporter +} + +func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse { + return &MockMetadataResponse{ + leaders: make(map[string]map[int32]int32), + brokers: make(map[string]int32), + t: t, + } +} + +func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse { + partitions := mmr.leaders[topic] + if partitions == nil { + partitions = make(map[int32]int32) + mmr.leaders[topic] = partitions + } + partitions[partition] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse { + mmr.brokers[addr] = brokerID + return mmr +} + +func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder { + metadataRequest := reqBody.(*MetadataRequest) + metadataResponse := &MetadataResponse{} + for addr, brokerID := range mmr.brokers { + metadataResponse.AddBroker(addr, brokerID) + } + if len(metadataRequest.Topics) == 0 { + for topic, partitions := range mmr.leaders { + for partition, brokerID := range partitions { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse + } + for _, topic := range metadataRequest.Topics { + for partition, brokerID := range mmr.leaders[topic] { + metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError) + } + } + return metadataResponse +} + +// MockOffsetResponse is an `OffsetResponse` builder. +type MockOffsetResponse struct { + offsets map[string]map[int32]map[int64]int64 + t TestReporter +} + +func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse { + return &MockOffsetResponse{ + offsets: make(map[string]map[int32]map[int64]int64), + t: t, + } +} + +func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse { + partitions := mor.offsets[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]int64) + mor.offsets[topic] = partitions + } + times := partitions[partition] + if times == nil { + times = make(map[int64]int64) + partitions[partition] = times + } + times[time] = offset + return mor +} + +func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder { + offsetRequest := reqBody.(*OffsetRequest) + offsetResponse := &OffsetResponse{} + for topic, partitions := range offsetRequest.blocks { + for partition, block := range partitions { + offset := mor.getOffset(topic, partition, block.time) + offsetResponse.AddTopicPartition(topic, partition, offset) + } + } + return offsetResponse +} + +func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 { + partitions := mor.offsets[topic] + if partitions == nil { + mor.t.Errorf("missing topic: %s", topic) + } + times := partitions[partition] + if times == nil { + mor.t.Errorf("missing partition: %d", partition) + } + offset, ok := times[time] + if !ok { + mor.t.Errorf("missing time: %d", time) + } + return offset +} + +// MockFetchResponse is a `FetchResponse` builder. +type MockFetchResponse struct { + messages map[string]map[int32]map[int64]Encoder + highWaterMarks map[string]map[int32]int64 + t TestReporter + batchSize int +} + +func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse { + return &MockFetchResponse{ + messages: make(map[string]map[int32]map[int64]Encoder), + highWaterMarks: make(map[string]map[int32]int64), + t: t, + batchSize: batchSize, + } +} + +func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse { + partitions := mfr.messages[topic] + if partitions == nil { + partitions = make(map[int32]map[int64]Encoder) + mfr.messages[topic] = partitions + } + messages := partitions[partition] + if messages == nil { + messages = make(map[int64]Encoder) + partitions[partition] = messages + } + messages[offset] = msg + return mfr +} + +func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + partitions = make(map[int32]int64) + mfr.highWaterMarks[topic] = partitions + } + partitions[partition] = offset + return mfr +} + +func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder { + fetchRequest := reqBody.(*FetchRequest) + res := &FetchResponse{} + for topic, partitions := range fetchRequest.blocks { + for partition, block := range partitions { + initialOffset := block.fetchOffset + offset := initialOffset + maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition)) + for i := 0; i < mfr.batchSize && offset < maxOffset; { + msg := mfr.getMessage(topic, partition, offset) + if msg != nil { + res.AddMessage(topic, partition, nil, msg, offset) + i++ + } + offset++ + } + fb := res.GetBlock(topic, partition) + if fb == nil { + res.AddError(topic, partition, ErrNoError) + fb = res.GetBlock(topic, partition) + } + fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition) + } + } + return res +} + +func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder { + partitions := mfr.messages[topic] + if partitions == nil { + return nil + } + messages := partitions[partition] + if messages == nil { + return nil + } + return messages[offset] +} + +func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int { + partitions := mfr.messages[topic] + if partitions == nil { + return 0 + } + messages := partitions[partition] + if messages == nil { + return 0 + } + return len(messages) +} + +func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 { + partitions := mfr.highWaterMarks[topic] + if partitions == nil { + return 0 + } + return partitions[partition] +} + +// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder. +type MockConsumerMetadataResponse struct { + coordinators map[string]interface{} + t TestReporter +} + +func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse { + return &MockConsumerMetadataResponse{ + coordinators: make(map[string]interface{}), + t: t, + } +} + +func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse { + mr.coordinators[group] = broker + return mr +} + +func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse { + mr.coordinators[group] = kerror + return mr +} + +func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ConsumerMetadataRequest) + group := req.ConsumerGroup + res := &ConsumerMetadataResponse{} + v := mr.coordinators[group] + switch v := v.(type) { + case *MockBroker: + res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()} + case KError: + res.Err = v + } + return res +} + +// MockOffsetCommitResponse is a `OffsetCommitResponse` builder. +type MockOffsetCommitResponse struct { + errors map[string]map[string]map[int32]KError + t TestReporter +} + +func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse { + return &MockOffsetCommitResponse{t: t} +} + +func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[string]map[int32]KError) + } + topics := mr.errors[group] + if topics == nil { + topics = make(map[string]map[int32]KError) + mr.errors[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]KError) + topics[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetCommitRequest) + group := req.ConsumerGroup + res := &OffsetCommitResponse{} + for topic, partitions := range req.blocks { + for partition := range partitions { + res.AddError(topic, partition, mr.getError(group, topic, partition)) + } + } + return res +} + +func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError { + topics := mr.errors[group] + if topics == nil { + return ErrNoError + } + partitions := topics[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockProduceResponse is a `ProduceResponse` builder. +type MockProduceResponse struct { + errors map[string]map[int32]KError + t TestReporter +} + +func NewMockProduceResponse(t TestReporter) *MockProduceResponse { + return &MockProduceResponse{t: t} +} + +func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse { + if mr.errors == nil { + mr.errors = make(map[string]map[int32]KError) + } + partitions := mr.errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + mr.errors[topic] = partitions + } + partitions[partition] = kerror + return mr +} + +func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*ProduceRequest) + res := &ProduceResponse{} + for topic, partitions := range req.msgSets { + for partition := range partitions { + res.AddTopicPartition(topic, partition, mr.getError(topic, partition)) + } + } + return res +} + +func (mr *MockProduceResponse) getError(topic string, partition int32) KError { + partitions := mr.errors[topic] + if partitions == nil { + return ErrNoError + } + kerror, ok := partitions[partition] + if !ok { + return ErrNoError + } + return kerror +} + +// MockOffsetFetchResponse is a `OffsetFetchResponse` builder. +type MockOffsetFetchResponse struct { + offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock + t TestReporter +} + +func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse { + return &MockOffsetFetchResponse{t: t} +} + +func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse { + if mr.offsets == nil { + mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock) + } + topics := mr.offsets[group] + if topics == nil { + topics = make(map[string]map[int32]*OffsetFetchResponseBlock) + mr.offsets[group] = topics + } + partitions := topics[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + topics[topic] = partitions + } + partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror} + return mr +} + +func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder { + req := reqBody.(*OffsetFetchRequest) + group := req.ConsumerGroup + res := &OffsetFetchResponse{} + for topic, partitions := range mr.offsets[group] { + for partition, block := range partitions { + res.AddBlock(topic, partition, block) + } + } + return res +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go new file mode 100644 index 0000000..b21ea63 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_request.go @@ -0,0 +1,190 @@ +package sarama + +// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which +// tells the broker to set the timestamp to the time at which the request was received. +// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2. +const ReceiveTime int64 = -1 + +// GroupGenerationUndefined is a special value for the group generation field of +// Offset Commit Requests that should be used when a consumer group does not rely +// on Kafka for partition management. +const GroupGenerationUndefined = -1 + +type offsetCommitRequestBlock struct { + offset int64 + timestamp int64 + metadata string +} + +func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(b.offset) + if version == 1 { + pe.putInt64(b.timestamp) + } else if b.timestamp != 0 { + Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored") + } + + return pe.putString(b.metadata) +} + +func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.offset, err = pd.getInt64(); err != nil { + return err + } + if version == 1 { + if b.timestamp, err = pd.getInt64(); err != nil { + return err + } + } + b.metadata, err = pd.getString() + return err +} + +type OffsetCommitRequest struct { + ConsumerGroup string + ConsumerGroupGeneration int32 // v1 or later + ConsumerID string // v1 or later + RetentionTime int64 // v2 or later + + // Version can be: + // - 0 (kafka 0.8.1 and later) + // - 1 (kafka 0.8.2 and later) + // - 2 (kafka 0.9.0 and later) + Version int16 + blocks map[string]map[int32]*offsetCommitRequestBlock +} + +func (r *OffsetCommitRequest) encode(pe packetEncoder) error { + if r.Version < 0 || r.Version > 2 { + return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"} + } + + if err := pe.putString(r.ConsumerGroup); err != nil { + return err + } + + if r.Version >= 1 { + pe.putInt32(r.ConsumerGroupGeneration) + if err := pe.putString(r.ConsumerID); err != nil { + return err + } + } else { + if r.ConsumerGroupGeneration != 0 { + Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored") + } + if r.ConsumerID != "" { + Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored") + } + } + + if r.Version >= 2 { + pe.putInt64(r.RetentionTime) + } else if r.RetentionTime != 0 { + Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored") + } + + if err := pe.putArrayLength(len(r.blocks)); err != nil { + return err + } + for topic, partitions := range r.blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + + if r.Version >= 1 { + if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil { + return err + } + if r.ConsumerID, err = pd.getString(); err != nil { + return err + } + } + + if r.Version >= 2 { + if r.RetentionTime, err = pd.getInt64(); err != nil { + return err + } + } + + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetCommitRequestBlock{} + if err := block.decode(pd, r.Version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetCommitRequest) key() int16 { + return 8 +} + +func (r *OffsetCommitRequest) version() int16 { + return r.Version +} + +func (r *OffsetCommitRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + case 2: + return V0_9_0_0 + default: + return minVersion + } +} + +func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock) + } + + r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata} +} diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go new file mode 100644 index 0000000..7f277e7 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_commit_response.go @@ -0,0 +1,85 @@ +package sarama + +type OffsetCommitResponse struct { + Errors map[string]map[int32]KError +} + +func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) { + if r.Errors == nil { + r.Errors = make(map[string]map[int32]KError) + } + partitions := r.Errors[topic] + if partitions == nil { + partitions = make(map[int32]KError) + r.Errors[topic] = partitions + } + partitions[partition] = kerror +} + +func (r *OffsetCommitResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Errors)); err != nil { + return err + } + for topic, partitions := range r.Errors { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, kerror := range partitions { + pe.putInt32(partition) + pe.putInt16(int16(kerror)) + } + } + return nil +} + +func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Errors = make(map[string]map[int32]KError, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numErrors, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Errors[name] = make(map[int32]KError, numErrors) + + for j := 0; j < numErrors; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + r.Errors[name][id] = KError(tmp) + } + } + + return nil +} + +func (r *OffsetCommitResponse) key() int16 { + return 8 +} + +func (r *OffsetCommitResponse) version() int16 { + return 0 +} + +func (r *OffsetCommitResponse) requiredVersion() KafkaVersion { + return minVersion +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go new file mode 100644 index 0000000..b19fe79 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_request.go @@ -0,0 +1,81 @@ +package sarama + +type OffsetFetchRequest struct { + ConsumerGroup string + Version int16 + partitions map[string][]int32 +} + +func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) { + if r.Version < 0 || r.Version > 1 { + return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"} + } + + if err = pe.putString(r.ConsumerGroup); err != nil { + return err + } + if err = pe.putArrayLength(len(r.partitions)); err != nil { + return err + } + for topic, partitions := range r.partitions { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putInt32Array(partitions); err != nil { + return err + } + } + return nil +} + +func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + if r.ConsumerGroup, err = pd.getString(); err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + if partitionCount == 0 { + return nil + } + r.partitions = make(map[string][]int32) + for i := 0; i < partitionCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitions, err := pd.getInt32Array() + if err != nil { + return err + } + r.partitions[topic] = partitions + } + return nil +} + +func (r *OffsetFetchRequest) key() int16 { + return 9 +} + +func (r *OffsetFetchRequest) version() int16 { + return r.Version +} + +func (r *OffsetFetchRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_8_2_0 + default: + return minVersion + } +} + +func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) { + if r.partitions == nil { + r.partitions = make(map[string][]int32) + } + + r.partitions[topic] = append(r.partitions[topic], partitionID) +} diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go new file mode 100644 index 0000000..323220e --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_fetch_response.go @@ -0,0 +1,143 @@ +package sarama + +type OffsetFetchResponseBlock struct { + Offset int64 + Metadata string + Err KError +} + +func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) { + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + b.Metadata, err = pd.getString() + if err != nil { + return err + } + + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + return nil +} + +func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) { + pe.putInt64(b.Offset) + + err = pe.putString(b.Metadata) + if err != nil { + return err + } + + pe.putInt16(int16(b.Err)) + + return nil +} + +type OffsetFetchResponse struct { + Blocks map[string]map[int32]*OffsetFetchResponseBlock +} + +func (r *OffsetFetchResponse) encode(pe packetEncoder) error { + if err := pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + for topic, partitions := range r.Blocks { + if err := pe.putString(topic); err != nil { + return err + } + if err := pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err := block.encode(pe); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil || numTopics == 0 { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + if numBlocks == 0 { + r.Blocks[name] = nil + continue + } + r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetFetchResponseBlock) + err = block.decode(pd) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetFetchResponse) key() int16 { + return 9 +} + +func (r *OffsetFetchResponse) version() int16 { + return 0 +} + +func (r *OffsetFetchResponse) requiredVersion() KafkaVersion { + return minVersion +} + +func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock) + } + partitions := r.Blocks[topic] + if partitions == nil { + partitions = make(map[int32]*OffsetFetchResponseBlock) + r.Blocks[topic] = partitions + } + partitions[partition] = block +} diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go new file mode 100644 index 0000000..5e15cda --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_manager.go @@ -0,0 +1,542 @@ +package sarama + +import ( + "sync" + "time" +) + +// Offset Manager + +// OffsetManager uses Kafka to store and fetch consumed partition offsets. +type OffsetManager interface { + // ManagePartition creates a PartitionOffsetManager on the given topic/partition. + // It will return an error if this OffsetManager is already managing the given + // topic/partition. + ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) + + // Close stops the OffsetManager from managing offsets. It is required to call + // this function before an OffsetManager object passes out of scope, as it + // will otherwise leak memory. You must call this after all the + // PartitionOffsetManagers are closed. + Close() error +} + +type offsetManager struct { + client Client + conf *Config + group string + + lock sync.Mutex + poms map[string]map[int32]*partitionOffsetManager + boms map[*Broker]*brokerOffsetManager +} + +// NewOffsetManagerFromClient creates a new OffsetManager from the given client. +// It is still necessary to call Close() on the underlying client when finished with the partition manager. +func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) { + // Check that we are not dealing with a closed Client before processing any other arguments + if client.Closed() { + return nil, ErrClosedClient + } + + om := &offsetManager{ + client: client, + conf: client.Config(), + group: group, + poms: make(map[string]map[int32]*partitionOffsetManager), + boms: make(map[*Broker]*brokerOffsetManager), + } + + return om, nil +} + +func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) { + pom, err := om.newPartitionOffsetManager(topic, partition) + if err != nil { + return nil, err + } + + om.lock.Lock() + defer om.lock.Unlock() + + topicManagers := om.poms[topic] + if topicManagers == nil { + topicManagers = make(map[int32]*partitionOffsetManager) + om.poms[topic] = topicManagers + } + + if topicManagers[partition] != nil { + return nil, ConfigurationError("That topic/partition is already being managed") + } + + topicManagers[partition] = pom + return pom, nil +} + +func (om *offsetManager) Close() error { + return nil +} + +func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager { + om.lock.Lock() + defer om.lock.Unlock() + + bom := om.boms[broker] + if bom == nil { + bom = om.newBrokerOffsetManager(broker) + om.boms[broker] = bom + } + + bom.refs++ + + return bom +} + +func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + bom.refs-- + + if bom.refs == 0 { + close(bom.updateSubscriptions) + if om.boms[bom.broker] == bom { + delete(om.boms, bom.broker) + } + } +} + +func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + delete(om.boms, bom.broker) +} + +func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) { + om.lock.Lock() + defer om.lock.Unlock() + + delete(om.poms[pom.topic], pom.partition) + if len(om.poms[pom.topic]) == 0 { + delete(om.poms, pom.topic) + } +} + +// Partition Offset Manager + +// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close() +// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes +// out of scope. +type PartitionOffsetManager interface { + // NextOffset returns the next offset that should be consumed for the managed + // partition, accompanied by metadata which can be used to reconstruct the state + // of the partition consumer when it resumes. NextOffset() will return + // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset + // was committed for this partition yet. + NextOffset() (int64, string) + + // MarkOffset marks the provided offset, alongside a metadata string + // that represents the state of the partition consumer at that point in time. The + // metadata string can be used by another consumer to restore that state, so it + // can resume consumption. + // + // To follow upstream conventions, you are expected to mark the offset of the + // next message to read, not the last message read. Thus, when calling `MarkOffset` + // you should typically add one to the offset of the last consumed message. + // + // Note: calling MarkOffset does not necessarily commit the offset to the backend + // store immediately for efficiency reasons, and it may never be committed if + // your application crashes. This means that you may end up processing the same + // message twice, and your processing should ideally be idempotent. + MarkOffset(offset int64, metadata string) + + // Errors returns a read channel of errors that occur during offset management, if + // enabled. By default, errors are logged and not returned over this channel. If + // you want to implement any custom error handling, set your config's + // Consumer.Return.Errors setting to true, and read from this channel. + Errors() <-chan *ConsumerError + + // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will + // return immediately, after which you should wait until the 'errors' channel has + // been drained and closed. It is required to call this function, or Close before + // a consumer object passes out of scope, as it will otherwise leak memory. You + // must call this before calling Close on the underlying client. + AsyncClose() + + // Close stops the PartitionOffsetManager from managing offsets. It is required to + // call this function (or AsyncClose) before a PartitionOffsetManager object + // passes out of scope, as it will otherwise leak memory. You must call this + // before calling Close on the underlying client. + Close() error +} + +type partitionOffsetManager struct { + parent *offsetManager + topic string + partition int32 + + lock sync.Mutex + offset int64 + metadata string + dirty bool + clean sync.Cond + broker *brokerOffsetManager + + errors chan *ConsumerError + rebalance chan none + dying chan none +} + +func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) { + pom := &partitionOffsetManager{ + parent: om, + topic: topic, + partition: partition, + errors: make(chan *ConsumerError, om.conf.ChannelBufferSize), + rebalance: make(chan none, 1), + dying: make(chan none), + } + pom.clean.L = &pom.lock + + if err := pom.selectBroker(); err != nil { + return nil, err + } + + if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil { + return nil, err + } + + pom.broker.updateSubscriptions <- pom + + go withRecover(pom.mainLoop) + + return pom, nil +} + +func (pom *partitionOffsetManager) mainLoop() { + for { + select { + case <-pom.rebalance: + if err := pom.selectBroker(); err != nil { + pom.handleError(err) + pom.rebalance <- none{} + } else { + pom.broker.updateSubscriptions <- pom + } + case <-pom.dying: + if pom.broker != nil { + select { + case <-pom.rebalance: + case pom.broker.updateSubscriptions <- pom: + } + pom.parent.unrefBrokerOffsetManager(pom.broker) + } + pom.parent.abandonPartitionOffsetManager(pom) + close(pom.errors) + return + } + } +} + +func (pom *partitionOffsetManager) selectBroker() error { + if pom.broker != nil { + pom.parent.unrefBrokerOffsetManager(pom.broker) + pom.broker = nil + } + + var broker *Broker + var err error + + if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil { + return err + } + + if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil { + return err + } + + pom.broker = pom.parent.refBrokerOffsetManager(broker) + return nil +} + +func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error { + request := new(OffsetFetchRequest) + request.Version = 1 + request.ConsumerGroup = pom.parent.group + request.AddPartition(pom.topic, pom.partition) + + response, err := pom.broker.broker.FetchOffset(request) + if err != nil { + return err + } + + block := response.GetBlock(pom.topic, pom.partition) + if block == nil { + return ErrIncompleteResponse + } + + switch block.Err { + case ErrNoError: + pom.offset = block.Offset + pom.metadata = block.Metadata + return nil + case ErrNotCoordinatorForConsumer: + if retries <= 0 { + return block.Err + } + if err := pom.selectBroker(); err != nil { + return err + } + return pom.fetchInitialOffset(retries - 1) + case ErrOffsetsLoadInProgress: + if retries <= 0 { + return block.Err + } + time.Sleep(pom.parent.conf.Metadata.Retry.Backoff) + return pom.fetchInitialOffset(retries - 1) + default: + return block.Err + } +} + +func (pom *partitionOffsetManager) handleError(err error) { + cErr := &ConsumerError{ + Topic: pom.topic, + Partition: pom.partition, + Err: err, + } + + if pom.parent.conf.Consumer.Return.Errors { + pom.errors <- cErr + } else { + Logger.Println(cErr) + } +} + +func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError { + return pom.errors +} + +func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if offset > pom.offset { + pom.offset = offset + pom.metadata = metadata + pom.dirty = true + } +} + +func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset == offset && pom.metadata == metadata { + pom.dirty = false + pom.clean.Signal() + } +} + +func (pom *partitionOffsetManager) NextOffset() (int64, string) { + pom.lock.Lock() + defer pom.lock.Unlock() + + if pom.offset >= 0 { + return pom.offset, pom.metadata + } + + return pom.parent.conf.Consumer.Offsets.Initial, "" +} + +func (pom *partitionOffsetManager) AsyncClose() { + go func() { + pom.lock.Lock() + defer pom.lock.Unlock() + + for pom.dirty { + pom.clean.Wait() + } + + close(pom.dying) + }() +} + +func (pom *partitionOffsetManager) Close() error { + pom.AsyncClose() + + var errors ConsumerErrors + for err := range pom.errors { + errors = append(errors, err) + } + + if len(errors) > 0 { + return errors + } + return nil +} + +// Broker Offset Manager + +type brokerOffsetManager struct { + parent *offsetManager + broker *Broker + timer *time.Ticker + updateSubscriptions chan *partitionOffsetManager + subscriptions map[*partitionOffsetManager]none + refs int +} + +func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager { + bom := &brokerOffsetManager{ + parent: om, + broker: broker, + timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval), + updateSubscriptions: make(chan *partitionOffsetManager), + subscriptions: make(map[*partitionOffsetManager]none), + } + + go withRecover(bom.mainLoop) + + return bom +} + +func (bom *brokerOffsetManager) mainLoop() { + for { + select { + case <-bom.timer.C: + if len(bom.subscriptions) > 0 { + bom.flushToBroker() + } + case s, ok := <-bom.updateSubscriptions: + if !ok { + bom.timer.Stop() + return + } + if _, ok := bom.subscriptions[s]; ok { + delete(bom.subscriptions, s) + } else { + bom.subscriptions[s] = none{} + } + } + } +} + +func (bom *brokerOffsetManager) flushToBroker() { + request := bom.constructRequest() + if request == nil { + return + } + + response, err := bom.broker.CommitOffset(request) + + if err != nil { + bom.abort(err) + return + } + + for s := range bom.subscriptions { + if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil { + continue + } + + var err KError + var ok bool + + if response.Errors[s.topic] == nil { + s.handleError(ErrIncompleteResponse) + delete(bom.subscriptions, s) + s.rebalance <- none{} + continue + } + if err, ok = response.Errors[s.topic][s.partition]; !ok { + s.handleError(ErrIncompleteResponse) + delete(bom.subscriptions, s) + s.rebalance <- none{} + continue + } + + switch err { + case ErrNoError: + block := request.blocks[s.topic][s.partition] + s.updateCommitted(block.offset, block.metadata) + case ErrNotLeaderForPartition, ErrLeaderNotAvailable, + ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer: + // not a critical error, we just need to redispatch + delete(bom.subscriptions, s) + s.rebalance <- none{} + case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize: + // nothing we can do about this, just tell the user and carry on + s.handleError(err) + case ErrOffsetsLoadInProgress: + // nothing wrong but we didn't commit, we'll get it next time round + break + case ErrUnknownTopicOrPartition: + // let the user know *and* try redispatching - if topic-auto-create is + // enabled, redispatching should trigger a metadata request and create the + // topic; if not then re-dispatching won't help, but we've let the user + // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706) + fallthrough + default: + // dunno, tell the user and try redispatching + s.handleError(err) + delete(bom.subscriptions, s) + s.rebalance <- none{} + } + } +} + +func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest { + var r *OffsetCommitRequest + var perPartitionTimestamp int64 + if bom.parent.conf.Consumer.Offsets.Retention == 0 { + perPartitionTimestamp = ReceiveTime + r = &OffsetCommitRequest{ + Version: 1, + ConsumerGroup: bom.parent.group, + ConsumerGroupGeneration: GroupGenerationUndefined, + } + } else { + r = &OffsetCommitRequest{ + Version: 2, + RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond), + ConsumerGroup: bom.parent.group, + ConsumerGroupGeneration: GroupGenerationUndefined, + } + + } + + for s := range bom.subscriptions { + s.lock.Lock() + if s.dirty { + r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata) + } + s.lock.Unlock() + } + + if len(r.blocks) > 0 { + return r + } + + return nil +} + +func (bom *brokerOffsetManager) abort(err error) { + _ = bom.broker.Close() // we don't care about the error this might return, we already have one + bom.parent.abandonBroker(bom) + + for pom := range bom.subscriptions { + pom.handleError(err) + pom.rebalance <- none{} + } + + for s := range bom.updateSubscriptions { + if _, ok := bom.subscriptions[s]; !ok { + s.handleError(err) + s.rebalance <- none{} + } + } + + bom.subscriptions = make(map[*partitionOffsetManager]none) +} diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go new file mode 100644 index 0000000..6c26960 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_request.go @@ -0,0 +1,132 @@ +package sarama + +type offsetRequestBlock struct { + time int64 + maxOffsets int32 // Only used in version 0 +} + +func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error { + pe.putInt64(int64(b.time)) + if version == 0 { + pe.putInt32(b.maxOffsets) + } + + return nil +} + +func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) { + if b.time, err = pd.getInt64(); err != nil { + return err + } + if version == 0 { + if b.maxOffsets, err = pd.getInt32(); err != nil { + return err + } + } + return nil +} + +type OffsetRequest struct { + Version int16 + blocks map[string]map[int32]*offsetRequestBlock +} + +func (r *OffsetRequest) encode(pe packetEncoder) error { + pe.putInt32(-1) // replica ID is always -1 for clients + err := pe.putArrayLength(len(r.blocks)) + if err != nil { + return err + } + for topic, partitions := range r.blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.Version); err != nil { + return err + } + } + } + return nil +} + +func (r *OffsetRequest) decode(pd packetDecoder, version int16) error { + r.Version = version + + // Ignore replica ID + if _, err := pd.getInt32(); err != nil { + return err + } + blockCount, err := pd.getArrayLength() + if err != nil { + return err + } + if blockCount == 0 { + return nil + } + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + for i := 0; i < blockCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + block := &offsetRequestBlock{} + if err := block.decode(pd, version); err != nil { + return err + } + r.blocks[topic][partition] = block + } + } + return nil +} + +func (r *OffsetRequest) key() int16 { + return 2 +} + +func (r *OffsetRequest) version() int16 { + return r.Version +} + +func (r *OffsetRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return minVersion + } +} + +func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) { + if r.blocks == nil { + r.blocks = make(map[string]map[int32]*offsetRequestBlock) + } + + if r.blocks[topic] == nil { + r.blocks[topic] = make(map[int32]*offsetRequestBlock) + } + + tmp := new(offsetRequestBlock) + tmp.time = time + if r.Version == 0 { + tmp.maxOffsets = maxOffsets + } + + r.blocks[topic][partitionID] = tmp +} diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go new file mode 100644 index 0000000..9a9cfe9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/offset_response.go @@ -0,0 +1,174 @@ +package sarama + +type OffsetResponseBlock struct { + Err KError + Offsets []int64 // Version 0 + Offset int64 // Version 1 + Timestamp int64 // Version 1 +} + +func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + if version == 0 { + b.Offsets, err = pd.getInt64Array() + + return err + } + + b.Timestamp, err = pd.getInt64() + if err != nil { + return err + } + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + // For backwards compatibility put the offset in the offsets array too + b.Offsets = []int64{b.Offset} + + return nil +} + +func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) { + pe.putInt16(int16(b.Err)) + + if version == 0 { + return pe.putInt64Array(b.Offsets) + } + + pe.putInt64(b.Timestamp) + pe.putInt64(b.Offset) + + return nil +} + +type OffsetResponse struct { + Version int16 + Blocks map[string]map[int32]*OffsetResponseBlock +} + +func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) { + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(OffsetResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + return nil +} + +func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +/* +// [0 0 0 1 ntopics +0 8 109 121 95 116 111 112 105 99 topic +0 0 0 1 npartitions +0 0 0 0 id +0 0 + +0 0 0 1 0 0 0 0 +0 1 1 1 0 0 0 1 +0 8 109 121 95 116 111 112 +105 99 0 0 0 1 0 0 +0 0 0 0 0 0 0 1 +0 0 0 0 0 1 1 1] + +*/ +func (r *OffsetResponse) encode(pe packetEncoder) (err error) { + if err = pe.putArrayLength(len(r.Blocks)); err != nil { + return err + } + + for topic, partitions := range r.Blocks { + if err = pe.putString(topic); err != nil { + return err + } + if err = pe.putArrayLength(len(partitions)); err != nil { + return err + } + for partition, block := range partitions { + pe.putInt32(partition) + if err = block.encode(pe, r.version()); err != nil { + return err + } + } + } + + return nil +} + +func (r *OffsetResponse) key() int16 { + return 2 +} + +func (r *OffsetResponse) version() int16 { + return r.Version +} + +func (r *OffsetResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_10_1_0 + default: + return minVersion + } +} + +// testing API + +func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*OffsetResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*OffsetResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset} +} diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go new file mode 100644 index 0000000..28670c0 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_decoder.go @@ -0,0 +1,45 @@ +package sarama + +// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules. +// Types implementing Decoder only need to worry about calling methods like GetString, +// not about how a string is represented in Kafka. +type packetDecoder interface { + // Primitives + getInt8() (int8, error) + getInt16() (int16, error) + getInt32() (int32, error) + getInt64() (int64, error) + getArrayLength() (int, error) + + // Collections + getBytes() ([]byte, error) + getString() (string, error) + getInt32Array() ([]int32, error) + getInt64Array() ([]int64, error) + getStringArray() ([]string, error) + + // Subsets + remaining() int + getSubset(length int) (packetDecoder, error) + + // Stacks, see PushDecoder + push(in pushDecoder) error + pop() error +} + +// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity +// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where +// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they +// depend upon have been decoded. +type pushDecoder interface { + // Saves the offset into the input buffer as the location to actually read the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and check the field. + // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes + // of data from the saved offset, and verify it based on the data between the saved offset and curOffset. + check(curOffset int, buf []byte) error +} diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go new file mode 100644 index 0000000..27a10f6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/packet_encoder.go @@ -0,0 +1,50 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules. +// Types implementing Encoder only need to worry about calling methods like PutString, +// not about how a string is represented in Kafka. +type packetEncoder interface { + // Primitives + putInt8(in int8) + putInt16(in int16) + putInt32(in int32) + putInt64(in int64) + putArrayLength(in int) error + + // Collections + putBytes(in []byte) error + putRawBytes(in []byte) error + putString(in string) error + putStringArray(in []string) error + putInt32Array(in []int32) error + putInt64Array(in []int64) error + + // Provide the current offset to record the batch size metric + offset() int + + // Stacks, see PushEncoder + push(in pushEncoder) + pop() error + + // To record metrics when provided + metricRegistry() metrics.Registry +} + +// PushEncoder is the interface for encoding fields like CRCs and lengths where the value +// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where +// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they +// depend upon have been written. +type pushEncoder interface { + // Saves the offset into the input buffer as the location to actually write the calculated value when able. + saveOffset(in int) + + // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32). + reserveLength() int + + // Indicates that all required data is now available to calculate and write the field. + // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes + // of data to the saved offset, based on the data between the saved offset and curOffset. + run(curOffset int, buf []byte) error +} diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go new file mode 100644 index 0000000..9729327 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/partitioner.go @@ -0,0 +1,135 @@ +package sarama + +import ( + "hash" + "hash/fnv" + "math/rand" + "time" +) + +// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1], +// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided +// as simple default implementations. +type Partitioner interface { + // Partition takes a message and partition count and chooses a partition + Partition(message *ProducerMessage, numPartitions int32) (int32, error) + + // RequiresConsistency indicates to the user of the partitioner whether the + // mapping of key->partition is consistent or not. Specifically, if a + // partitioner requires consistency then it must be allowed to choose from all + // partitions (even ones known to be unavailable), and its choice must be + // respected by the caller. The obvious example is the HashPartitioner. + RequiresConsistency() bool +} + +// PartitionerConstructor is the type for a function capable of constructing new Partitioners. +type PartitionerConstructor func(topic string) Partitioner + +type manualPartitioner struct{} + +// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided +// ProducerMessage's Partition field as the partition to produce to. +func NewManualPartitioner(topic string) Partitioner { + return new(manualPartitioner) +} + +func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return message.Partition, nil +} + +func (p *manualPartitioner) RequiresConsistency() bool { + return true +} + +type randomPartitioner struct { + generator *rand.Rand +} + +// NewRandomPartitioner returns a Partitioner which chooses a random partition each time. +func NewRandomPartitioner(topic string) Partitioner { + p := new(randomPartitioner) + p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano())) + return p +} + +func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + return int32(p.generator.Intn(int(numPartitions))), nil +} + +func (p *randomPartitioner) RequiresConsistency() bool { + return false +} + +type roundRobinPartitioner struct { + partition int32 +} + +// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time. +func NewRoundRobinPartitioner(topic string) Partitioner { + return &roundRobinPartitioner{} +} + +func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if p.partition >= numPartitions { + p.partition = 0 + } + ret := p.partition + p.partition++ + return ret, nil +} + +func (p *roundRobinPartitioner) RequiresConsistency() bool { + return false +} + +type hashPartitioner struct { + random Partitioner + hasher hash.Hash32 +} + +// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher. +// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that +// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance. +func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor { + return func(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = hasher() + return p + } +} + +// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a +// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used, +// modulus the number of partitions. This ensures that messages with the same key always end up on the +// same partition. +func NewHashPartitioner(topic string) Partitioner { + p := new(hashPartitioner) + p.random = NewRandomPartitioner(topic) + p.hasher = fnv.New32a() + return p +} + +func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) { + if message.Key == nil { + return p.random.Partition(message, numPartitions) + } + bytes, err := message.Key.Encode() + if err != nil { + return -1, err + } + p.hasher.Reset() + _, err = p.hasher.Write(bytes) + if err != nil { + return -1, err + } + partition := int32(p.hasher.Sum32()) % numPartitions + if partition < 0 { + partition = -partition + } + return partition, nil +} + +func (p *hashPartitioner) RequiresConsistency() bool { + return true +} diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go new file mode 100644 index 0000000..fd5ea0f --- /dev/null +++ b/vendor/github.com/Shopify/sarama/prep_encoder.go @@ -0,0 +1,121 @@ +package sarama + +import ( + "fmt" + "math" + + "github.com/rcrowley/go-metrics" +) + +type prepEncoder struct { + length int +} + +// primitives + +func (pe *prepEncoder) putInt8(in int8) { + pe.length++ +} + +func (pe *prepEncoder) putInt16(in int16) { + pe.length += 2 +} + +func (pe *prepEncoder) putInt32(in int32) { + pe.length += 4 +} + +func (pe *prepEncoder) putInt64(in int64) { + pe.length += 8 +} + +func (pe *prepEncoder) putArrayLength(in int) error { + if in > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)} + } + pe.length += 4 + return nil +} + +// arrays + +func (pe *prepEncoder) putBytes(in []byte) error { + pe.length += 4 + if in == nil { + return nil + } + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putRawBytes(in []byte) error { + if len(in) > math.MaxInt32 { + return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putString(in string) error { + pe.length += 2 + if len(in) > math.MaxInt16 { + return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))} + } + pe.length += len(in) + return nil +} + +func (pe *prepEncoder) putStringArray(in []string) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, str := range in { + if err := pe.putString(str); err != nil { + return err + } + } + + return nil +} + +func (pe *prepEncoder) putInt32Array(in []int32) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 4 * len(in) + return nil +} + +func (pe *prepEncoder) putInt64Array(in []int64) error { + err := pe.putArrayLength(len(in)) + if err != nil { + return err + } + pe.length += 8 * len(in) + return nil +} + +func (pe *prepEncoder) offset() int { + return pe.length +} + +// stackable + +func (pe *prepEncoder) push(in pushEncoder) { + pe.length += in.reserveLength() +} + +func (pe *prepEncoder) pop() error { + return nil +} + +// we do not record metrics during the prep encoder pass +func (pe *prepEncoder) metricRegistry() metrics.Registry { + return nil +} diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go new file mode 100644 index 0000000..40dc801 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_request.go @@ -0,0 +1,209 @@ +package sarama + +import "github.com/rcrowley/go-metrics" + +// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements +// it must see before responding. Any of the constants defined here are valid. On broker versions +// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many +// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced +// by setting the `min.isr` value in the brokers configuration). +type RequiredAcks int16 + +const ( + // NoResponse doesn't send any response, the TCP ACK is all you get. + NoResponse RequiredAcks = 0 + // WaitForLocal waits for only the local commit to succeed before responding. + WaitForLocal RequiredAcks = 1 + // WaitForAll waits for all in-sync replicas to commit before responding. + // The minimum number of in-sync replicas is configured on the broker via + // the `min.insync.replicas` configuration key. + WaitForAll RequiredAcks = -1 +) + +type ProduceRequest struct { + RequiredAcks RequiredAcks + Timeout int32 + Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10 + msgSets map[string]map[int32]*MessageSet +} + +func (r *ProduceRequest) encode(pe packetEncoder) error { + pe.putInt16(int16(r.RequiredAcks)) + pe.putInt32(r.Timeout) + err := pe.putArrayLength(len(r.msgSets)) + if err != nil { + return err + } + metricRegistry := pe.metricRegistry() + var batchSizeMetric metrics.Histogram + var compressionRatioMetric metrics.Histogram + if metricRegistry != nil { + batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry) + compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry) + } + + totalRecordCount := int64(0) + for topic, partitions := range r.msgSets { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + topicRecordCount := int64(0) + var topicCompressionRatioMetric metrics.Histogram + if metricRegistry != nil { + topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry) + } + for id, msgSet := range partitions { + startOffset := pe.offset() + pe.putInt32(id) + pe.push(&lengthField{}) + err = msgSet.encode(pe) + if err != nil { + return err + } + err = pe.pop() + if err != nil { + return err + } + if metricRegistry != nil { + for _, messageBlock := range msgSet.Messages { + // Is this a fake "message" wrapping real messages? + if messageBlock.Msg.Set != nil { + topicRecordCount += int64(len(messageBlock.Msg.Set.Messages)) + } else { + // A single uncompressed message + topicRecordCount++ + } + // Better be safe than sorry when computing the compression ratio + if messageBlock.Msg.compressedSize != 0 { + compressionRatio := float64(len(messageBlock.Msg.Value)) / + float64(messageBlock.Msg.compressedSize) + // Histogram do not support decimal values, let's multiple it by 100 for better precision + intCompressionRatio := int64(100 * compressionRatio) + compressionRatioMetric.Update(intCompressionRatio) + topicCompressionRatioMetric.Update(intCompressionRatio) + } + } + batchSize := int64(pe.offset() - startOffset) + batchSizeMetric.Update(batchSize) + getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize) + } + } + if topicRecordCount > 0 { + getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount) + getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount) + totalRecordCount += topicRecordCount + } + } + if totalRecordCount > 0 { + metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount) + getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount) + } + + return nil +} + +func (r *ProduceRequest) decode(pd packetDecoder, version int16) error { + requiredAcks, err := pd.getInt16() + if err != nil { + return err + } + r.RequiredAcks = RequiredAcks(requiredAcks) + if r.Timeout, err = pd.getInt32(); err != nil { + return err + } + topicCount, err := pd.getArrayLength() + if err != nil { + return err + } + if topicCount == 0 { + return nil + } + r.msgSets = make(map[string]map[int32]*MessageSet) + for i := 0; i < topicCount; i++ { + topic, err := pd.getString() + if err != nil { + return err + } + partitionCount, err := pd.getArrayLength() + if err != nil { + return err + } + r.msgSets[topic] = make(map[int32]*MessageSet) + for j := 0; j < partitionCount; j++ { + partition, err := pd.getInt32() + if err != nil { + return err + } + messageSetSize, err := pd.getInt32() + if err != nil { + return err + } + msgSetDecoder, err := pd.getSubset(int(messageSetSize)) + if err != nil { + return err + } + msgSet := &MessageSet{} + err = msgSet.decode(msgSetDecoder) + if err != nil { + return err + } + r.msgSets[topic][partition] = msgSet + } + } + return nil +} + +func (r *ProduceRequest) key() int16 { + return 0 +} + +func (r *ProduceRequest) version() int16 { + return r.Version +} + +func (r *ProduceRequest) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + default: + return minVersion + } +} + +func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) { + if r.msgSets == nil { + r.msgSets = make(map[string]map[int32]*MessageSet) + } + + if r.msgSets[topic] == nil { + r.msgSets[topic] = make(map[int32]*MessageSet) + } + + set := r.msgSets[topic][partition] + + if set == nil { + set = new(MessageSet) + r.msgSets[topic][partition] = set + } + + set.addMessage(msg) +} + +func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) { + if r.msgSets == nil { + r.msgSets = make(map[string]map[int32]*MessageSet) + } + + if r.msgSets[topic] == nil { + r.msgSets[topic] = make(map[int32]*MessageSet) + } + + r.msgSets[topic][partition] = set +} diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go new file mode 100644 index 0000000..3f05dd9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_response.go @@ -0,0 +1,159 @@ +package sarama + +import "time" + +type ProduceResponseBlock struct { + Err KError + Offset int64 + // only provided if Version >= 2 and the broker is configured with `LogAppendTime` + Timestamp time.Time +} + +func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) { + tmp, err := pd.getInt16() + if err != nil { + return err + } + b.Err = KError(tmp) + + b.Offset, err = pd.getInt64() + if err != nil { + return err + } + + if version >= 2 { + if millis, err := pd.getInt64(); err != nil { + return err + } else if millis != -1 { + b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond)) + } + } + + return nil +} + +type ProduceResponse struct { + Blocks map[string]map[int32]*ProduceResponseBlock + Version int16 + ThrottleTime time.Duration // only provided if Version >= 1 +} + +func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) { + r.Version = version + + numTopics, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics) + for i := 0; i < numTopics; i++ { + name, err := pd.getString() + if err != nil { + return err + } + + numBlocks, err := pd.getArrayLength() + if err != nil { + return err + } + + r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks) + + for j := 0; j < numBlocks; j++ { + id, err := pd.getInt32() + if err != nil { + return err + } + + block := new(ProduceResponseBlock) + err = block.decode(pd, version) + if err != nil { + return err + } + r.Blocks[name][id] = block + } + } + + if r.Version >= 1 { + millis, err := pd.getInt32() + if err != nil { + return err + } + + r.ThrottleTime = time.Duration(millis) * time.Millisecond + } + + return nil +} + +func (r *ProduceResponse) encode(pe packetEncoder) error { + err := pe.putArrayLength(len(r.Blocks)) + if err != nil { + return err + } + for topic, partitions := range r.Blocks { + err = pe.putString(topic) + if err != nil { + return err + } + err = pe.putArrayLength(len(partitions)) + if err != nil { + return err + } + for id, prb := range partitions { + pe.putInt32(id) + pe.putInt16(int16(prb.Err)) + pe.putInt64(prb.Offset) + } + } + if r.Version >= 1 { + pe.putInt32(int32(r.ThrottleTime / time.Millisecond)) + } + return nil +} + +func (r *ProduceResponse) key() int16 { + return 0 +} + +func (r *ProduceResponse) version() int16 { + return r.Version +} + +func (r *ProduceResponse) requiredVersion() KafkaVersion { + switch r.Version { + case 1: + return V0_9_0_0 + case 2: + return V0_10_0_0 + default: + return minVersion + } +} + +func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock { + if r.Blocks == nil { + return nil + } + + if r.Blocks[topic] == nil { + return nil + } + + return r.Blocks[topic][partition] +} + +// Testing API + +func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) { + if r.Blocks == nil { + r.Blocks = make(map[string]map[int32]*ProduceResponseBlock) + } + byTopic, ok := r.Blocks[topic] + if !ok { + byTopic = make(map[int32]*ProduceResponseBlock) + r.Blocks[topic] = byTopic + } + byTopic[partition] = &ProduceResponseBlock{Err: err} +} diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go new file mode 100644 index 0000000..158d9c4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/produce_set.go @@ -0,0 +1,176 @@ +package sarama + +import "time" + +type partitionSet struct { + msgs []*ProducerMessage + setToSend *MessageSet + bufferBytes int +} + +type produceSet struct { + parent *asyncProducer + msgs map[string]map[int32]*partitionSet + + bufferBytes int + bufferCount int +} + +func newProduceSet(parent *asyncProducer) *produceSet { + return &produceSet{ + msgs: make(map[string]map[int32]*partitionSet), + parent: parent, + } +} + +func (ps *produceSet) add(msg *ProducerMessage) error { + var err error + var key, val []byte + + if msg.Key != nil { + if key, err = msg.Key.Encode(); err != nil { + return err + } + } + + if msg.Value != nil { + if val, err = msg.Value.Encode(); err != nil { + return err + } + } + + partitions := ps.msgs[msg.Topic] + if partitions == nil { + partitions = make(map[int32]*partitionSet) + ps.msgs[msg.Topic] = partitions + } + + set := partitions[msg.Partition] + if set == nil { + set = &partitionSet{setToSend: new(MessageSet)} + partitions[msg.Partition] = set + } + + set.msgs = append(set.msgs, msg) + msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val} + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + if msg.Timestamp.IsZero() { + msgToSend.Timestamp = time.Now() + } else { + msgToSend.Timestamp = msg.Timestamp + } + msgToSend.Version = 1 + } + set.setToSend.addMessage(msgToSend) + + size := producerMessageOverhead + len(key) + len(val) + set.bufferBytes += size + ps.bufferBytes += size + ps.bufferCount++ + + return nil +} + +func (ps *produceSet) buildRequest() *ProduceRequest { + req := &ProduceRequest{ + RequiredAcks: ps.parent.conf.Producer.RequiredAcks, + Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond), + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + req.Version = 2 + } + + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + if ps.parent.conf.Producer.Compression == CompressionNone { + req.AddSet(topic, partition, set.setToSend) + } else { + // When compression is enabled, the entire set for each partition is compressed + // and sent as the payload of a single fake "message" with the appropriate codec + // set and no key. When the server sees a message with a compression codec, it + // decompresses the payload and treats the result as its message set. + payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry) + if err != nil { + Logger.Println(err) // if this happens, it's basically our fault. + panic(err) + } + compMsg := &Message{ + Codec: ps.parent.conf.Producer.Compression, + Key: nil, + Value: payload, + Set: set.setToSend, // Provide the underlying message set for accurate metrics + } + if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) { + compMsg.Version = 1 + compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp + } + req.AddMessage(topic, partition, compMsg) + } + } + } + + return req +} + +func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) { + for topic, partitionSet := range ps.msgs { + for partition, set := range partitionSet { + cb(topic, partition, set.msgs) + } + } +} + +func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage { + if ps.msgs[topic] == nil { + return nil + } + set := ps.msgs[topic][partition] + if set == nil { + return nil + } + ps.bufferBytes -= set.bufferBytes + ps.bufferCount -= len(set.msgs) + delete(ps.msgs[topic], partition) + return set.msgs +} + +func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool { + switch { + // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety. + case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)): + return true + // Would we overflow the size-limit of a compressed message-batch for this partition? + case ps.parent.conf.Producer.Compression != CompressionNone && + ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil && + ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes: + return true + // Would we overflow simply in number of messages? + case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages: + return true + default: + return false + } +} + +func (ps *produceSet) readyToFlush() bool { + switch { + // If we don't have any messages, nothing else matters + case ps.empty(): + return false + // If all three config values are 0, we always flush as-fast-as-possible + case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0: + return true + // If we've passed the message trigger-point + case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages: + return true + // If we've passed the byte trigger-point + case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes: + return true + default: + return false + } +} + +func (ps *produceSet) empty() bool { + return ps.bufferCount == 0 +} diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go new file mode 100644 index 0000000..3cf9353 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_decoder.go @@ -0,0 +1,260 @@ +package sarama + +import ( + "encoding/binary" + "math" +) + +var errInvalidArrayLength = PacketDecodingError{"invalid array length"} +var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"} +var errInvalidStringLength = PacketDecodingError{"invalid string length"} +var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"} + +type realDecoder struct { + raw []byte + off int + stack []pushDecoder +} + +// primitives + +func (rd *realDecoder) getInt8() (int8, error) { + if rd.remaining() < 1 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int8(rd.raw[rd.off]) + rd.off++ + return tmp, nil +} + +func (rd *realDecoder) getInt16() (int16, error) { + if rd.remaining() < 2 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:])) + rd.off += 2 + return tmp, nil +} + +func (rd *realDecoder) getInt32() (int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + return tmp, nil +} + +func (rd *realDecoder) getInt64() (int64, error) { + if rd.remaining() < 8 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + return tmp, nil +} + +func (rd *realDecoder) getArrayLength() (int, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } + tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + if tmp > rd.remaining() { + rd.off = len(rd.raw) + return -1, ErrInsufficientData + } else if tmp > 2*math.MaxUint16 { + return -1, errInvalidArrayLength + } + return tmp, nil +} + +// collections + +func (rd *realDecoder) getBytes() ([]byte, error) { + tmp, err := rd.getInt32() + + if err != nil { + return nil, err + } + + n := int(tmp) + + switch { + case n < -1: + return nil, errInvalidByteSliceLength + case n == -1: + return nil, nil + case n == 0: + return make([]byte, 0), nil + case n > rd.remaining(): + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + tmpStr := rd.raw[rd.off : rd.off+n] + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getString() (string, error) { + tmp, err := rd.getInt16() + + if err != nil { + return "", err + } + + n := int(tmp) + + switch { + case n < -1: + return "", errInvalidStringLength + case n == -1: + return "", nil + case n == 0: + return "", nil + case n > rd.remaining(): + rd.off = len(rd.raw) + return "", ErrInsufficientData + } + + tmpStr := string(rd.raw[rd.off : rd.off+n]) + rd.off += n + return tmpStr, nil +} + +func (rd *realDecoder) getInt32Array() ([]int32, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 4*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int32, n) + for i := range ret { + ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + } + return ret, nil +} + +func (rd *realDecoder) getInt64Array() ([]int64, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if rd.remaining() < 8*n { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]int64, n) + for i := range ret { + ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:])) + rd.off += 8 + } + return ret, nil +} + +func (rd *realDecoder) getStringArray() ([]string, error) { + if rd.remaining() < 4 { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + n := int(binary.BigEndian.Uint32(rd.raw[rd.off:])) + rd.off += 4 + + if n == 0 { + return nil, nil + } + + if n < 0 { + return nil, errInvalidArrayLength + } + + ret := make([]string, n) + for i := range ret { + str, err := rd.getString() + if err != nil { + return nil, err + } + + ret[i] = str + } + return ret, nil +} + +// subsets + +func (rd *realDecoder) remaining() int { + return len(rd.raw) - rd.off +} + +func (rd *realDecoder) getSubset(length int) (packetDecoder, error) { + if length < 0 { + return nil, errInvalidSubsetSize + } else if length > rd.remaining() { + rd.off = len(rd.raw) + return nil, ErrInsufficientData + } + + start := rd.off + rd.off += length + return &realDecoder{raw: rd.raw[start:rd.off]}, nil +} + +// stacks + +func (rd *realDecoder) push(in pushDecoder) error { + in.saveOffset(rd.off) + + reserve := in.reserveLength() + if rd.remaining() < reserve { + rd.off = len(rd.raw) + return ErrInsufficientData + } + + rd.stack = append(rd.stack, in) + + rd.off += reserve + + return nil +} + +func (rd *realDecoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := rd.stack[len(rd.stack)-1] + rd.stack = rd.stack[:len(rd.stack)-1] + + return in.check(rd.off, rd.raw) +} diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go new file mode 100644 index 0000000..ced4267 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/real_encoder.go @@ -0,0 +1,129 @@ +package sarama + +import ( + "encoding/binary" + + "github.com/rcrowley/go-metrics" +) + +type realEncoder struct { + raw []byte + off int + stack []pushEncoder + registry metrics.Registry +} + +// primitives + +func (re *realEncoder) putInt8(in int8) { + re.raw[re.off] = byte(in) + re.off++ +} + +func (re *realEncoder) putInt16(in int16) { + binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in)) + re.off += 2 +} + +func (re *realEncoder) putInt32(in int32) { + binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in)) + re.off += 4 +} + +func (re *realEncoder) putInt64(in int64) { + binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in)) + re.off += 8 +} + +func (re *realEncoder) putArrayLength(in int) error { + re.putInt32(int32(in)) + return nil +} + +// collection + +func (re *realEncoder) putRawBytes(in []byte) error { + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putBytes(in []byte) error { + if in == nil { + re.putInt32(-1) + return nil + } + re.putInt32(int32(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putString(in string) error { + re.putInt16(int16(len(in))) + copy(re.raw[re.off:], in) + re.off += len(in) + return nil +} + +func (re *realEncoder) putStringArray(in []string) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + + for _, val := range in { + if err := re.putString(val); err != nil { + return err + } + } + + return nil +} + +func (re *realEncoder) putInt32Array(in []int32) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt32(val) + } + return nil +} + +func (re *realEncoder) putInt64Array(in []int64) error { + err := re.putArrayLength(len(in)) + if err != nil { + return err + } + for _, val := range in { + re.putInt64(val) + } + return nil +} + +func (re *realEncoder) offset() int { + return re.off +} + +// stacks + +func (re *realEncoder) push(in pushEncoder) { + in.saveOffset(re.off) + re.off += in.reserveLength() + re.stack = append(re.stack, in) +} + +func (re *realEncoder) pop() error { + // this is go's ugly pop pattern (the inverse of append) + in := re.stack[len(re.stack)-1] + re.stack = re.stack[:len(re.stack)-1] + + return in.run(re.off, re.raw) +} + +// we do record metrics during the real encoder pass +func (re *realEncoder) metricRegistry() metrics.Registry { + return re.registry +} diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go new file mode 100644 index 0000000..73310ca --- /dev/null +++ b/vendor/github.com/Shopify/sarama/request.go @@ -0,0 +1,119 @@ +package sarama + +import ( + "encoding/binary" + "fmt" + "io" +) + +type protocolBody interface { + encoder + versionedDecoder + key() int16 + version() int16 + requiredVersion() KafkaVersion +} + +type request struct { + correlationID int32 + clientID string + body protocolBody +} + +func (r *request) encode(pe packetEncoder) (err error) { + pe.push(&lengthField{}) + pe.putInt16(r.body.key()) + pe.putInt16(r.body.version()) + pe.putInt32(r.correlationID) + err = pe.putString(r.clientID) + if err != nil { + return err + } + err = r.body.encode(pe) + if err != nil { + return err + } + return pe.pop() +} + +func (r *request) decode(pd packetDecoder) (err error) { + var key int16 + if key, err = pd.getInt16(); err != nil { + return err + } + var version int16 + if version, err = pd.getInt16(); err != nil { + return err + } + if r.correlationID, err = pd.getInt32(); err != nil { + return err + } + r.clientID, err = pd.getString() + + r.body = allocateBody(key, version) + if r.body == nil { + return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)} + } + return r.body.decode(pd, version) +} + +func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) { + lengthBytes := make([]byte, 4) + if _, err := io.ReadFull(r, lengthBytes); err != nil { + return nil, bytesRead, err + } + bytesRead += len(lengthBytes) + + length := int32(binary.BigEndian.Uint32(lengthBytes)) + if length <= 4 || length > MaxRequestSize { + return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)} + } + + encodedReq := make([]byte, length) + if _, err := io.ReadFull(r, encodedReq); err != nil { + return nil, bytesRead, err + } + bytesRead += len(encodedReq) + + req = &request{} + if err := decode(encodedReq, req); err != nil { + return nil, bytesRead, err + } + return req, bytesRead, nil +} + +func allocateBody(key, version int16) protocolBody { + switch key { + case 0: + return &ProduceRequest{} + case 1: + return &FetchRequest{} + case 2: + return &OffsetRequest{Version: version} + case 3: + return &MetadataRequest{} + case 8: + return &OffsetCommitRequest{Version: version} + case 9: + return &OffsetFetchRequest{} + case 10: + return &ConsumerMetadataRequest{} + case 11: + return &JoinGroupRequest{} + case 12: + return &HeartbeatRequest{} + case 13: + return &LeaveGroupRequest{} + case 14: + return &SyncGroupRequest{} + case 15: + return &DescribeGroupsRequest{} + case 16: + return &ListGroupsRequest{} + case 17: + return &SaslHandshakeRequest{} + case 18: + return &ApiVersionsRequest{} + } + return nil +} diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go new file mode 100644 index 0000000..f3f4d27 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/response_header.go @@ -0,0 +1,21 @@ +package sarama + +import "fmt" + +type responseHeader struct { + length int32 + correlationID int32 +} + +func (r *responseHeader) decode(pd packetDecoder) (err error) { + r.length, err = pd.getInt32() + if err != nil { + return err + } + if r.length <= 4 || r.length > MaxResponseSize { + return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)} + } + + r.correlationID, err = pd.getInt32() + return err +} diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go new file mode 100644 index 0000000..7d5dc60 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sarama.go @@ -0,0 +1,99 @@ +/* +Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level +API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level +API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation. + +To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel +and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases. +The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be +useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees +depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the +SyncProducer can still sometimes be lost. + +To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic +consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the +https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 +and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support. + +For lower-level needs, the Broker and Request/Response objects permit precise control over each connection +and message sent on the wire; the Client provides higher-level metadata management that is shared between +the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up +exactly with the protocol fields documented by Kafka at +https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol + +Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry. + +Broker related metrics: + + +----------------------------------------------+------------+---------------------------------------------------------------+ + | Name | Type | Description | + +----------------------------------------------+------------+---------------------------------------------------------------+ + | incoming-byte-rate | meter | Bytes/second read off all brokers | + | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker | + | outgoing-byte-rate | meter | Bytes/second written off all brokers | + | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker | + | request-rate | meter | Requests/second sent to all brokers | + | request-rate-for-broker- | meter | Requests/second sent to a given broker | + | request-size | histogram | Distribution of the request size in bytes for all brokers | + | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker | + | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers | + | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker | + | response-rate | meter | Responses/second received from all brokers | + | response-rate-for-broker- | meter | Responses/second received from a given broker | + | response-size | histogram | Distribution of the response size in bytes for all brokers | + | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker | + +----------------------------------------------+------------+---------------------------------------------------------------+ + +Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics. + +Producer related metrics: + + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | Name | Type | Description | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics | + | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic | + | record-send-rate | meter | Records/second sent to all topics | + | record-send-rate-for-topic- | meter | Records/second sent to a given topic | + | records-per-request | histogram | Distribution of the number of records sent per request for all topics | + | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic | + | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics | + | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic | + +-------------------------------------------+------------+--------------------------------------------------------------------------------------+ + +*/ +package sarama + +import ( + "io/ioutil" + "log" +) + +// Logger is the instance of a StdLogger interface that Sarama writes connection +// management events to. By default it is set to discard all log messages via ioutil.Discard, +// but you can set it to redirect wherever you want. +var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags) + +// StdLogger is used to log error messages. +type StdLogger interface { + Print(v ...interface{}) + Printf(format string, v ...interface{}) + Println(v ...interface{}) +} + +// PanicHandler is called for recovering from panics spawned internally to the library (and thus +// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered. +var PanicHandler func(interface{}) + +// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying +// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned +// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt +// to process. +var MaxRequestSize int32 = 100 * 1024 * 1024 + +// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If +// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to +// protect the client from running out of memory. Please note that brokers do not have any natural limit on +// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers +// (see https://issues.apache.org/jira/browse/KAFKA-2063). +var MaxResponseSize int32 = 100 * 1024 * 1024 diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go new file mode 100644 index 0000000..fbbc894 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go @@ -0,0 +1,33 @@ +package sarama + +type SaslHandshakeRequest struct { + Mechanism string +} + +func (r *SaslHandshakeRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.Mechanism); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) { + if r.Mechanism, err = pd.getString(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeRequest) key() int16 { + return 17 +} + +func (r *SaslHandshakeRequest) version() int16 { + return 0 +} + +func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go new file mode 100644 index 0000000..ef290d4 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go @@ -0,0 +1,38 @@ +package sarama + +type SaslHandshakeResponse struct { + Err KError + EnabledMechanisms []string +} + +func (r *SaslHandshakeResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putStringArray(r.EnabledMechanisms) +} + +func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + if r.EnabledMechanisms, err = pd.getStringArray(); err != nil { + return err + } + + return nil +} + +func (r *SaslHandshakeResponse) key() int16 { + return 17 +} + +func (r *SaslHandshakeResponse) version() int16 { + return 0 +} + +func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion { + return V0_10_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go new file mode 100644 index 0000000..fe20708 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_request.go @@ -0,0 +1,100 @@ +package sarama + +type SyncGroupRequest struct { + GroupId string + GenerationId int32 + MemberId string + GroupAssignments map[string][]byte +} + +func (r *SyncGroupRequest) encode(pe packetEncoder) error { + if err := pe.putString(r.GroupId); err != nil { + return err + } + + pe.putInt32(r.GenerationId) + + if err := pe.putString(r.MemberId); err != nil { + return err + } + + if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil { + return err + } + for memberId, memberAssignment := range r.GroupAssignments { + if err := pe.putString(memberId); err != nil { + return err + } + if err := pe.putBytes(memberAssignment); err != nil { + return err + } + } + + return nil +} + +func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) { + if r.GroupId, err = pd.getString(); err != nil { + return + } + if r.GenerationId, err = pd.getInt32(); err != nil { + return + } + if r.MemberId, err = pd.getString(); err != nil { + return + } + + n, err := pd.getArrayLength() + if err != nil { + return err + } + if n == 0 { + return nil + } + + r.GroupAssignments = make(map[string][]byte) + for i := 0; i < n; i++ { + memberId, err := pd.getString() + if err != nil { + return err + } + memberAssignment, err := pd.getBytes() + if err != nil { + return err + } + + r.GroupAssignments[memberId] = memberAssignment + } + + return nil +} + +func (r *SyncGroupRequest) key() int16 { + return 14 +} + +func (r *SyncGroupRequest) version() int16 { + return 0 +} + +func (r *SyncGroupRequest) requiredVersion() KafkaVersion { + return V0_9_0_0 +} + +func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) { + if r.GroupAssignments == nil { + r.GroupAssignments = make(map[string][]byte) + } + + r.GroupAssignments[memberId] = memberAssignment +} + +func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error { + bin, err := encode(memberAssignment, nil) + if err != nil { + return err + } + + r.AddGroupAssignment(memberId, bin) + return nil +} diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go new file mode 100644 index 0000000..194b382 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_group_response.go @@ -0,0 +1,41 @@ +package sarama + +type SyncGroupResponse struct { + Err KError + MemberAssignment []byte +} + +func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) { + assignment := new(ConsumerGroupMemberAssignment) + err := decode(r.MemberAssignment, assignment) + return assignment, err +} + +func (r *SyncGroupResponse) encode(pe packetEncoder) error { + pe.putInt16(int16(r.Err)) + return pe.putBytes(r.MemberAssignment) +} + +func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) { + kerr, err := pd.getInt16() + if err != nil { + return err + } + + r.Err = KError(kerr) + + r.MemberAssignment, err = pd.getBytes() + return +} + +func (r *SyncGroupResponse) key() int16 { + return 14 +} + +func (r *SyncGroupResponse) version() int16 { + return 0 +} + +func (r *SyncGroupResponse) requiredVersion() KafkaVersion { + return V0_9_0_0 +} diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go new file mode 100644 index 0000000..dd096b6 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/sync_producer.go @@ -0,0 +1,164 @@ +package sarama + +import "sync" + +// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct +// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer +// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope. +// +// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual +// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`. +// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost. +// +// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to +// be set to true in its configuration. +type SyncProducer interface { + + // SendMessage produces a given message, and returns only when it either has + // succeeded or failed to produce. It will return the partition and the offset + // of the produced message, or an error if the message failed to produce. + SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) + + // SendMessages produces a given set of messages, and returns only when all + // messages in the set have either succeeded or failed. Note that messages + // can succeed and fail individually; if some succeed and some fail, + // SendMessages will return an error. + SendMessages(msgs []*ProducerMessage) error + + // Close shuts down the producer and waits for any buffered messages to be + // flushed. You must call this function before a producer object passes out of + // scope, as it may otherwise leak memory. You must call this before calling + // Close on the underlying client. + Close() error +} + +type syncProducer struct { + producer *asyncProducer + wg sync.WaitGroup +} + +// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration. +func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) { + if config == nil { + config = NewConfig() + config.Producer.Return.Successes = true + } + + if err := verifyProducerConfig(config); err != nil { + return nil, err + } + + p, err := NewAsyncProducer(addrs, config) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still +// necessary to call Close() on the underlying client when shutting down this producer. +func NewSyncProducerFromClient(client Client) (SyncProducer, error) { + if err := verifyProducerConfig(client.Config()); err != nil { + return nil, err + } + + p, err := NewAsyncProducerFromClient(client) + if err != nil { + return nil, err + } + return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil +} + +func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer { + sp := &syncProducer{producer: p} + + sp.wg.Add(2) + go withRecover(sp.handleSuccesses) + go withRecover(sp.handleErrors) + + return sp +} + +func verifyProducerConfig(config *Config) error { + if !config.Producer.Return.Errors { + return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer") + } + if !config.Producer.Return.Successes { + return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer") + } + return nil +} + +func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) { + oldMetadata := msg.Metadata + defer func() { + msg.Metadata = oldMetadata + }() + + expectation := make(chan *ProducerError, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + + if err := <-expectation; err != nil { + return -1, -1, err.Err + } + + return msg.Partition, msg.Offset, nil +} + +func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error { + savedMetadata := make([]interface{}, len(msgs)) + for i := range msgs { + savedMetadata[i] = msgs[i].Metadata + } + defer func() { + for i := range msgs { + msgs[i].Metadata = savedMetadata[i] + } + }() + + expectations := make(chan chan *ProducerError, len(msgs)) + go func() { + for _, msg := range msgs { + expectation := make(chan *ProducerError, 1) + msg.Metadata = expectation + sp.producer.Input() <- msg + expectations <- expectation + } + close(expectations) + }() + + var errors ProducerErrors + for expectation := range expectations { + if err := <-expectation; err != nil { + errors = append(errors, err) + } + } + + if len(errors) > 0 { + return errors + } + return nil +} + +func (sp *syncProducer) handleSuccesses() { + defer sp.wg.Done() + for msg := range sp.producer.Successes() { + expectation := msg.Metadata.(chan *ProducerError) + expectation <- nil + } +} + +func (sp *syncProducer) handleErrors() { + defer sp.wg.Done() + for err := range sp.producer.Errors() { + expectation := err.Msg.Metadata.(chan *ProducerError) + expectation <- err + } +} + +func (sp *syncProducer) Close() error { + sp.producer.AsyncClose() + sp.wg.Wait() + return nil +} diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go new file mode 100644 index 0000000..dc0e7e9 --- /dev/null +++ b/vendor/github.com/Shopify/sarama/utils.go @@ -0,0 +1,150 @@ +package sarama + +import ( + "bufio" + "net" +) + +type none struct{} + +// make []int32 sortable so we can sort partition numbers +type int32Slice []int32 + +func (slice int32Slice) Len() int { + return len(slice) +} + +func (slice int32Slice) Less(i, j int) bool { + return slice[i] < slice[j] +} + +func (slice int32Slice) Swap(i, j int) { + slice[i], slice[j] = slice[j], slice[i] +} + +func dupInt32Slice(input []int32) []int32 { + ret := make([]int32, 0, len(input)) + for _, val := range input { + ret = append(ret, val) + } + return ret +} + +func withRecover(fn func()) { + defer func() { + handler := PanicHandler + if handler != nil { + if err := recover(); err != nil { + handler(err) + } + } + }() + + fn() +} + +func safeAsyncClose(b *Broker) { + tmp := b // local var prevents clobbering in goroutine + go withRecover(func() { + if connected, _ := tmp.Connected(); connected { + if err := tmp.Close(); err != nil { + Logger.Println("Error closing broker", tmp.ID(), ":", err) + } + } + }) +} + +// Encoder is a simple interface for any type that can be encoded as an array of bytes +// in order to be sent as the key or value of a Kafka message. Length() is provided as an +// optimization, and must return the same as len() on the result of Encode(). +type Encoder interface { + Encode() ([]byte, error) + Length() int +} + +// make strings and byte slices encodable for convenience so they can be used as keys +// and/or values in kafka messages + +// StringEncoder implements the Encoder interface for Go strings so that they can be used +// as the Key or Value in a ProducerMessage. +type StringEncoder string + +func (s StringEncoder) Encode() ([]byte, error) { + return []byte(s), nil +} + +func (s StringEncoder) Length() int { + return len(s) +} + +// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used +// as the Key or Value in a ProducerMessage. +type ByteEncoder []byte + +func (b ByteEncoder) Encode() ([]byte, error) { + return b, nil +} + +func (b ByteEncoder) Length() int { + return len(b) +} + +// bufConn wraps a net.Conn with a buffer for reads to reduce the number of +// reads that trigger syscalls. +type bufConn struct { + net.Conn + buf *bufio.Reader +} + +func newBufConn(conn net.Conn) *bufConn { + return &bufConn{ + Conn: conn, + buf: bufio.NewReader(conn), + } +} + +func (bc *bufConn) Read(b []byte) (n int, err error) { + return bc.buf.Read(b) +} + +// KafkaVersion instances represent versions of the upstream Kafka broker. +type KafkaVersion struct { + // it's a struct rather than just typing the array directly to make it opaque and stop people + // generating their own arbitrary versions + version [4]uint +} + +func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion { + return KafkaVersion{ + version: [4]uint{major, minor, veryMinor, patch}, + } +} + +// IsAtLeast return true if and only if the version it is called on is +// greater than or equal to the version passed in: +// V1.IsAtLeast(V2) // false +// V2.IsAtLeast(V1) // true +func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool { + for i := range v.version { + if v.version[i] > other.version[i] { + return true + } else if v.version[i] < other.version[i] { + return false + } + } + return true +} + +// Effective constants defining the supported kafka versions. +var ( + V0_8_2_0 = newKafkaVersion(0, 8, 2, 0) + V0_8_2_1 = newKafkaVersion(0, 8, 2, 1) + V0_8_2_2 = newKafkaVersion(0, 8, 2, 2) + V0_9_0_0 = newKafkaVersion(0, 9, 0, 0) + V0_9_0_1 = newKafkaVersion(0, 9, 0, 1) + V0_10_0_0 = newKafkaVersion(0, 10, 0, 0) + V0_10_0_1 = newKafkaVersion(0, 10, 0, 1) + V0_10_1_0 = newKafkaVersion(0, 10, 1, 0) + V0_10_2_0 = newKafkaVersion(0, 10, 2, 0) + minVersion = V0_8_2_0 +) diff --git a/vendor/github.com/Xe/ln/doc.go b/vendor/github.com/Xe/ln/doc.go new file mode 100644 index 0000000..ab81c3c --- /dev/null +++ b/vendor/github.com/Xe/ln/doc.go @@ -0,0 +1,25 @@ +/* +Package ln is the Natural Logger for Go + +`ln` provides a simple interface to logging, and metrics, and +obviates the need to utilize purpose built metrics packages, like +`go-metrics` for simple use cases. + +The design of `ln` centers around the idea of key-value pairs, which +can be interpreted on the fly, but "Filters" to do things such as +aggregated metrics, and report said metrics to, say Librato, or +statsd. + +"Filters" are like WSGI, or Rack Middleware. They are run "top down" +and can abort an emitted log's output at any time, or continue to let +it through the chain. However, the interface is slightly different +than that. Rather than encapsulating the chain with partial function +application, we utilize a simpler method, namely, each plugin defines +an `Apply` function, which takes as an argument the log event, and +performs the work of the plugin, only if the Plugin "Applies" to this +log event. + +If `Apply` returns `false`, the iteration through the rest of the +filters is aborted, and the log is dropped from further processing. +*/ +package ln diff --git a/vendor/github.com/Xe/ln/filter.go b/vendor/github.com/Xe/ln/filter.go new file mode 100644 index 0000000..4f2d006 --- /dev/null +++ b/vendor/github.com/Xe/ln/filter.go @@ -0,0 +1,67 @@ +package ln + +import ( + "context" + "io" + "sync" +) + +// Filter interface for defining chain filters +type Filter interface { + Apply(ctx context.Context, e Event) bool + Run() + Close() +} + +// FilterFunc allows simple functions to implement the Filter interface +type FilterFunc func(ctx context.Context, e Event) bool + +// Apply implements the Filter interface +func (ff FilterFunc) Apply(ctx context.Context, e Event) bool { + return ff(ctx, e) +} + +// Run implements the Filter interface +func (ff FilterFunc) Run() {} + +// Close implements the Filter interface +func (ff FilterFunc) Close() {} + +// WriterFilter implements a filter, which arbitrarily writes to an io.Writer +type WriterFilter struct { + sync.Mutex + Out io.Writer + Formatter Formatter +} + +// NewWriterFilter creates a filter to add to the chain +func NewWriterFilter(out io.Writer, format Formatter) *WriterFilter { + if format == nil { + format = DefaultFormatter + } + return &WriterFilter{ + Out: out, + Formatter: format, + } +} + +// Apply implements the Filter interface +func (w *WriterFilter) Apply(ctx context.Context, e Event) bool { + output, err := w.Formatter.Format(ctx, e) + if err == nil { + w.Lock() + w.Out.Write(output) + w.Unlock() + } + + return true +} + +// Run implements the Filter interface +func (w *WriterFilter) Run() {} + +// Close implements the Filter interface +func (w *WriterFilter) Close() {} + +// NilFilter is safe to return as a Filter, but does nothing +var NilFilter = FilterFunc(func(_ context.Context, e Event) bool { return true }) diff --git a/vendor/github.com/Xe/ln/formatter.go b/vendor/github.com/Xe/ln/formatter.go new file mode 100644 index 0000000..70313fc --- /dev/null +++ b/vendor/github.com/Xe/ln/formatter.go @@ -0,0 +1,111 @@ +package ln + +import ( + "bytes" + "context" + "fmt" + "time" +) + +var ( + // DefaultTimeFormat represents the way in which time will be formatted by default + DefaultTimeFormat = time.RFC3339 +) + +// Formatter defines the formatting of events +type Formatter interface { + Format(ctx context.Context, e Event) ([]byte, error) +} + +// DefaultFormatter is the default way in which to format events +var DefaultFormatter Formatter + +func init() { + DefaultFormatter = NewTextFormatter() +} + +// TextFormatter formats events as key value pairs. +// Any remaining text not wrapped in an instance of `F` will be +// placed at the end. +type TextFormatter struct { + TimeFormat string +} + +// NewTextFormatter returns a Formatter that outputs as text. +func NewTextFormatter() Formatter { + return &TextFormatter{TimeFormat: DefaultTimeFormat} +} + +// Format implements the Formatter interface +func (t *TextFormatter) Format(_ context.Context, e Event) ([]byte, error) { + var writer bytes.Buffer + + writer.WriteString("time=\"") + writer.WriteString(e.Time.Format(t.TimeFormat)) + writer.WriteString("\"") + + keys := make([]string, len(e.Data)) + i := 0 + + for k := range e.Data { + keys[i] = k + i++ + } + + for _, k := range keys { + v := e.Data[k] + + writer.WriteByte(' ') + if shouldQuote(k) { + writer.WriteString(fmt.Sprintf("%q", k)) + } else { + writer.WriteString(k) + } + + writer.WriteByte('=') + + switch v.(type) { + case string: + vs, _ := v.(string) + if shouldQuote(vs) { + fmt.Fprintf(&writer, "%q", vs) + } else { + writer.WriteString(vs) + } + case error: + tmperr, _ := v.(error) + es := tmperr.Error() + + if shouldQuote(es) { + fmt.Fprintf(&writer, "%q", es) + } else { + writer.WriteString(es) + } + case time.Time: + tmptime, _ := v.(time.Time) + writer.WriteString(tmptime.Format(time.RFC3339)) + default: + fmt.Fprint(&writer, v) + } + } + + if len(e.Message) > 0 { + fmt.Fprintf(&writer, " _msg=%q", e.Message) + } + + writer.WriteByte('\n') + return writer.Bytes(), nil +} + +func shouldQuote(s string) bool { + for _, b := range s { + if !((b >= 'A' && b <= 'Z') || + (b >= 'a' && b <= 'z') || + (b >= '0' && b <= '9') || + (b == '-' || b == '.' || b == '#' || + b == '/' || b == '_')) { + return true + } + } + return false +} diff --git a/vendor/github.com/Xe/ln/logger.go b/vendor/github.com/Xe/ln/logger.go new file mode 100644 index 0000000..1d07557 --- /dev/null +++ b/vendor/github.com/Xe/ln/logger.go @@ -0,0 +1,175 @@ +package ln + +import ( + "context" + "os" + "time" + + "github.com/pkg/errors" +) + +// Logger holds the current priority and list of filters +type Logger struct { + Filters []Filter +} + +// DefaultLogger is the default implementation of Logger +var DefaultLogger *Logger + +func init() { + var defaultFilters []Filter + + // Default to STDOUT for logging, but allow LN_OUT to change it. + out := os.Stdout + if os.Getenv("LN_OUT") == "" { + out = os.Stderr + } + + defaultFilters = append(defaultFilters, NewWriterFilter(out, nil)) + + DefaultLogger = &Logger{ + Filters: defaultFilters, + } +} + +// F is a key-value mapping for structured data. +type F map[string]interface{} + +// Extend concatentates one F with one or many Fer instances. +func (f F) Extend(other ...Fer) { + for _, ff := range other { + for k, v := range ff.F() { + f[k] = v + } + } +} + +// F makes F an Fer +func (f F) F() F { + return f +} + +// Fer allows any type to add fields to the structured logging key->value pairs. +type Fer interface { + F() F +} + +// Event represents an event +type Event struct { + Time time.Time + Data F + Message string +} + +// Log is the generic logging method. +func (l *Logger) Log(ctx context.Context, xs ...Fer) { + event := Event{Time: time.Now()} + + addF := func(bf F) { + if event.Data == nil { + event.Data = bf + } else { + for k, v := range bf { + event.Data[k] = v + } + } + } + + for _, f := range xs { + addF(f.F()) + } + + if os.Getenv("LN_DEBUG_ALL_EVENTS") == "1" { + frame := callersFrame() + if event.Data == nil { + event.Data = make(F) + } + event.Data["_lineno"] = frame.lineno + event.Data["_function"] = frame.function + event.Data["_filename"] = frame.filename + } + + l.filter(ctx, event) +} + +func (l *Logger) filter(ctx context.Context, e Event) { + for _, f := range l.Filters { + if !f.Apply(ctx, e) { + return + } + } +} + +// Error logs an error and information about the context of said error. +func (l *Logger) Error(ctx context.Context, err error, xs ...Fer) { + data := F{} + frame := callersFrame() + + data["_lineno"] = frame.lineno + data["_function"] = frame.function + data["_filename"] = frame.filename + data["err"] = err + + cause := errors.Cause(err) + if cause != nil { + data["cause"] = cause.Error() + } + + xs = append(xs, data) + + l.Log(ctx, xs...) +} + +// Fatal logs this set of values, then exits with status code 1. +func (l *Logger) Fatal(ctx context.Context, xs ...Fer) { + xs = append(xs, F{"fatal": true}) + + l.Log(ctx, xs...) + + os.Exit(1) +} + +// FatalErr combines Fatal and Error. +func (l *Logger) FatalErr(ctx context.Context, err error, xs ...Fer) { + xs = append(xs, F{"fatal": true}) + + data := F{} + frame := callersFrame() + + data["_lineno"] = frame.lineno + data["_function"] = frame.function + data["_filename"] = frame.filename + data["err"] = err + + cause := errors.Cause(err) + if cause != nil { + data["cause"] = cause.Error() + } + + xs = append(xs, data) + l.Log(ctx, xs...) + + os.Exit(1) +} + +// Default Implementation + +// Log is the generic logging method. +func Log(ctx context.Context, xs ...Fer) { + DefaultLogger.Log(ctx, xs...) +} + +// Error logs an error and information about the context of said error. +func Error(ctx context.Context, err error, xs ...Fer) { + DefaultLogger.Error(ctx, err, xs...) +} + +// Fatal logs this set of values, then exits with status code 1. +func Fatal(ctx context.Context, xs ...Fer) { + DefaultLogger.Fatal(ctx, xs...) +} + +// FatalErr combines Fatal and Error. +func FatalErr(ctx context.Context, err error, xs ...Fer) { + DefaultLogger.FatalErr(ctx, err, xs...) +} diff --git a/vendor/github.com/Xe/ln/stack.go b/vendor/github.com/Xe/ln/stack.go new file mode 100644 index 0000000..1cf1e7a --- /dev/null +++ b/vendor/github.com/Xe/ln/stack.go @@ -0,0 +1,44 @@ +package ln + +import ( + "os" + "runtime" + "strings" +) + +type frame struct { + filename string + function string + lineno int +} + +// skips 2 frames, since Caller returns the current frame, and we need +// the caller's caller. +func callersFrame() frame { + var out frame + pc, file, line, ok := runtime.Caller(3) + if !ok { + return out + } + srcLoc := strings.LastIndex(file, "/src/") + if srcLoc >= 0 { + file = file[srcLoc+5:] + } + out.filename = file + out.function = functionName(pc) + out.lineno = line + + return out +} + +func functionName(pc uintptr) string { + fn := runtime.FuncForPC(pc) + if fn == nil { + return "???" + } + name := fn.Name() + beg := strings.LastIndex(name, string(os.PathSeparator)) + return name[beg+1:] + // end := strings.LastIndex(name, string(os.PathSeparator)) + // return name[end+1 : len(name)] +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go new file mode 100644 index 0000000..6655cc5 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/application_exception.go @@ -0,0 +1,142 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +const ( + UNKNOWN_APPLICATION_EXCEPTION = 0 + UNKNOWN_METHOD = 1 + INVALID_MESSAGE_TYPE_EXCEPTION = 2 + WRONG_METHOD_NAME = 3 + BAD_SEQUENCE_ID = 4 + MISSING_RESULT = 5 + INTERNAL_ERROR = 6 + PROTOCOL_ERROR = 7 +) + +// Application level Thrift exception +type TApplicationException interface { + TException + TypeId() int32 + Read(iprot TProtocol) (TApplicationException, error) + Write(oprot TProtocol) error +} + +type tApplicationException struct { + message string + type_ int32 +} + +func (e tApplicationException) Error() string { + return e.message +} + +func NewTApplicationException(type_ int32, message string) TApplicationException { + return &tApplicationException{message, type_} +} + +func (p *tApplicationException) TypeId() int32 { + return p.type_ +} + +func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) { + _, err := iprot.ReadStructBegin() + if err != nil { + return nil, err + } + + message := "" + type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) + + for { + _, ttype, id, err := iprot.ReadFieldBegin() + if err != nil { + return nil, err + } + if ttype == STOP { + break + } + switch id { + case 1: + if ttype == STRING { + if message, err = iprot.ReadString(); err != nil { + return nil, err + } + } else { + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return nil, err + } + } + case 2: + if ttype == I32 { + if type_, err = iprot.ReadI32(); err != nil { + return nil, err + } + } else { + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return nil, err + } + } + default: + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return nil, err + } + } + if err = iprot.ReadFieldEnd(); err != nil { + return nil, err + } + } + return NewTApplicationException(type_, message), iprot.ReadStructEnd() +} + +func (p *tApplicationException) Write(oprot TProtocol) (err error) { + err = oprot.WriteStructBegin("TApplicationException") + if len(p.Error()) > 0 { + err = oprot.WriteFieldBegin("message", STRING, 1) + if err != nil { + return + } + err = oprot.WriteString(p.Error()) + if err != nil { + return + } + err = oprot.WriteFieldEnd() + if err != nil { + return + } + } + err = oprot.WriteFieldBegin("type", I32, 2) + if err != nil { + return + } + err = oprot.WriteI32(p.type_) + if err != nil { + return + } + err = oprot.WriteFieldEnd() + if err != nil { + return + } + err = oprot.WriteFieldStop() + if err != nil { + return + } + err = oprot.WriteStructEnd() + return +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go new file mode 100644 index 0000000..690d341 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/binary_protocol.go @@ -0,0 +1,514 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +type TBinaryProtocol struct { + trans TRichTransport + origTransport TTransport + reader io.Reader + writer io.Writer + strictRead bool + strictWrite bool + buffer [64]byte +} + +type TBinaryProtocolFactory struct { + strictRead bool + strictWrite bool +} + +func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { + return NewTBinaryProtocol(t, false, true) +} + +func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { + p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} + if et, ok := t.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(t) + } + p.reader = p.trans + p.writer = p.trans + return p +} + +func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { + return NewTBinaryProtocolFactory(false, true) +} + +func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { + return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} +} + +func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { + return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) +} + +/** + * Writing Methods + */ + +func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + if p.strictWrite { + version := uint32(VERSION_1) | uint32(typeId) + e := p.WriteI32(int32(version)) + if e != nil { + return e + } + e = p.WriteString(name) + if e != nil { + return e + } + e = p.WriteI32(seqId) + return e + } else { + e := p.WriteString(name) + if e != nil { + return e + } + e = p.WriteByte(int8(typeId)) + if e != nil { + return e + } + e = p.WriteI32(seqId) + return e + } + return nil +} + +func (p *TBinaryProtocol) WriteMessageEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteStructBegin(name string) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + e := p.WriteByte(int8(typeId)) + if e != nil { + return e + } + e = p.WriteI16(id) + return e +} + +func (p *TBinaryProtocol) WriteFieldEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldStop() error { + e := p.WriteByte(STOP) + return e +} + +func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + e := p.WriteByte(int8(keyType)) + if e != nil { + return e + } + e = p.WriteByte(int8(valueType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteMapEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { + e := p.WriteByte(int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteListEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { + e := p.WriteByte(int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteSetEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteBool(value bool) error { + if value { + return p.WriteByte(1) + } + return p.WriteByte(0) +} + +func (p *TBinaryProtocol) WriteByte(value int8) error { + e := p.trans.WriteByte(byte(value)) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI16(value int16) error { + v := p.buffer[0:2] + binary.BigEndian.PutUint16(v, uint16(value)) + _, e := p.writer.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI32(value int32) error { + v := p.buffer[0:4] + binary.BigEndian.PutUint32(v, uint32(value)) + _, e := p.writer.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI64(value int64) error { + v := p.buffer[0:8] + binary.BigEndian.PutUint64(v, uint64(value)) + _, err := p.writer.Write(v) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteDouble(value float64) error { + return p.WriteI64(int64(math.Float64bits(value))) +} + +func (p *TBinaryProtocol) WriteString(value string) error { + e := p.WriteI32(int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.WriteString(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteBinary(value []byte) error { + e := p.WriteI32(int32(len(value))) + if e != nil { + return e + } + _, err := p.writer.Write(value) + return NewTProtocolException(err) +} + +/** + * Reading methods + */ + +func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + size, e := p.ReadI32() + if e != nil { + return "", typeId, 0, NewTProtocolException(e) + } + if size < 0 { + typeId = TMessageType(size & 0x0ff) + version := int64(int64(size) & VERSION_MASK) + if version != VERSION_1 { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) + } + name, e = p.ReadString() + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + seqId, e = p.ReadI32() + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + return name, typeId, seqId, nil + } + if p.strictRead { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) + } + name, e2 := p.readStringBody(size) + if e2 != nil { + return name, typeId, seqId, e2 + } + b, e3 := p.ReadByte() + if e3 != nil { + return name, typeId, seqId, e3 + } + typeId = TMessageType(b) + seqId, e4 := p.ReadI32() + if e4 != nil { + return name, typeId, seqId, e4 + } + return name, typeId, seqId, nil +} + +func (p *TBinaryProtocol) ReadMessageEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { + return +} + +func (p *TBinaryProtocol) ReadStructEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { + t, err := p.ReadByte() + typeId = TType(t) + if err != nil { + return name, typeId, seqId, err + } + if t != STOP { + seqId, err = p.ReadI16() + } + return name, typeId, seqId, err +} + +func (p *TBinaryProtocol) ReadFieldEnd() error { + return nil +} + +var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) + +func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { + k, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + kType = TType(k) + v, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + vType = TType(v) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return kType, vType, size, nil +} + +func (p *TBinaryProtocol) ReadMapEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { + b, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + return +} + +func (p *TBinaryProtocol) ReadListEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { + b, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return elemType, size, nil +} + +func (p *TBinaryProtocol) ReadSetEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadBool() (bool, error) { + b, e := p.ReadByte() + v := true + if b != 1 { + v = false + } + return v, e +} + +func (p *TBinaryProtocol) ReadByte() (int8, error) { + v, err := p.trans.ReadByte() + return int8(v), err +} + +func (p *TBinaryProtocol) ReadI16() (value int16, err error) { + buf := p.buffer[0:2] + err = p.readAll(buf) + value = int16(binary.BigEndian.Uint16(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI32() (value int32, err error) { + buf := p.buffer[0:4] + err = p.readAll(buf) + value = int32(binary.BigEndian.Uint32(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI64() (value int64, err error) { + buf := p.buffer[0:8] + err = p.readAll(buf) + value = int64(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { + buf := p.buffer[0:8] + err = p.readAll(buf) + value = math.Float64frombits(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadString() (value string, err error) { + size, e := p.ReadI32() + if e != nil { + return "", e + } + if size < 0 { + err = invalidDataLength + return + } + + return p.readStringBody(size) +} + +func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { + size, e := p.ReadI32() + if e != nil { + return nil, e + } + if size < 0 { + return nil, invalidDataLength + } + if uint64(size) > p.trans.RemainingBytes() { + return nil, invalidDataLength + } + + isize := int(size) + buf := make([]byte, isize) + _, err := io.ReadFull(p.trans, buf) + return buf, NewTProtocolException(err) +} + +func (p *TBinaryProtocol) Flush() (err error) { + return NewTProtocolException(p.trans.Flush()) +} + +func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TBinaryProtocol) Transport() TTransport { + return p.origTransport +} + +func (p *TBinaryProtocol) readAll(buf []byte) error { + _, err := io.ReadFull(p.reader, buf) + return NewTProtocolException(err) +} + +const readLimit = 32768 + +func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { + if size < 0 { + return "", nil + } + if uint64(size) > p.trans.RemainingBytes() { + return "", invalidDataLength + } + + var ( + buf bytes.Buffer + e error + b []byte + ) + + switch { + case int(size) <= len(p.buffer): + b = p.buffer[:size] // avoids allocation for small reads + case int(size) < readLimit: + b = make([]byte, size) + default: + b = make([]byte, readLimit) + } + + for size > 0 { + _, e = io.ReadFull(p.trans, b) + buf.Write(b) + if e != nil { + break + } + size -= readLimit + if size < readLimit && size > 0 { + b = b[:size] + } + } + return buf.String(), NewTProtocolException(e) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go new file mode 100644 index 0000000..b754f92 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/buffered_transport.go @@ -0,0 +1,91 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" +) + +type TBufferedTransportFactory struct { + size int +} + +type TBufferedTransport struct { + bufio.ReadWriter + tp TTransport +} + +func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return NewTBufferedTransport(trans, p.size), nil +} + +func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { + return &TBufferedTransportFactory{size: bufferSize} +} + +func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { + return &TBufferedTransport{ + ReadWriter: bufio.ReadWriter{ + Reader: bufio.NewReaderSize(trans, bufferSize), + Writer: bufio.NewWriterSize(trans, bufferSize), + }, + tp: trans, + } +} + +func (p *TBufferedTransport) IsOpen() bool { + return p.tp.IsOpen() +} + +func (p *TBufferedTransport) Open() (err error) { + return p.tp.Open() +} + +func (p *TBufferedTransport) Close() (err error) { + return p.tp.Close() +} + +func (p *TBufferedTransport) Read(b []byte) (int, error) { + n, err := p.ReadWriter.Read(b) + if err != nil { + p.ReadWriter.Reader.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Write(b []byte) (int, error) { + n, err := p.ReadWriter.Write(b) + if err != nil { + p.ReadWriter.Writer.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Flush() error { + if err := p.ReadWriter.Flush(); err != nil { + p.ReadWriter.Writer.Reset(p.tp) + return err + } + return p.tp.Flush() +} + +func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { + return p.tp.RemainingBytes() +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go new file mode 100644 index 0000000..2c729a2 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_go17.go @@ -0,0 +1,32 @@ +// +build go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +type mockProcessor struct { + ProcessFunc func(in, out TProtocol) (bool, TException) +} + +func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + return m.ProcessFunc(in, out) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go new file mode 100644 index 0000000..e6d0c4d --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/common_test_pre_go17.go @@ -0,0 +1,32 @@ +// +build !go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "golang.org/x/net/context" + +type mockProcessor struct { + ProcessFunc func(in, out TProtocol) (bool, TException) +} + +func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + return m.ProcessFunc(in, out) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go new file mode 100644 index 0000000..0bc5fdd --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/compact_protocol.go @@ -0,0 +1,815 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + COMPACT_PROTOCOL_ID = 0x082 + COMPACT_VERSION = 1 + COMPACT_VERSION_MASK = 0x1f + COMPACT_TYPE_MASK = 0x0E0 + COMPACT_TYPE_BITS = 0x07 + COMPACT_TYPE_SHIFT_AMOUNT = 5 +) + +type tCompactType byte + +const ( + COMPACT_BOOLEAN_TRUE = 0x01 + COMPACT_BOOLEAN_FALSE = 0x02 + COMPACT_BYTE = 0x03 + COMPACT_I16 = 0x04 + COMPACT_I32 = 0x05 + COMPACT_I64 = 0x06 + COMPACT_DOUBLE = 0x07 + COMPACT_BINARY = 0x08 + COMPACT_LIST = 0x09 + COMPACT_SET = 0x0A + COMPACT_MAP = 0x0B + COMPACT_STRUCT = 0x0C +) + +var ( + ttypeToCompactType map[TType]tCompactType +) + +func init() { + ttypeToCompactType = map[TType]tCompactType{ + STOP: STOP, + BOOL: COMPACT_BOOLEAN_TRUE, + BYTE: COMPACT_BYTE, + I16: COMPACT_I16, + I32: COMPACT_I32, + I64: COMPACT_I64, + DOUBLE: COMPACT_DOUBLE, + STRING: COMPACT_BINARY, + LIST: COMPACT_LIST, + SET: COMPACT_SET, + MAP: COMPACT_MAP, + STRUCT: COMPACT_STRUCT, + } +} + +type TCompactProtocolFactory struct{} + +func NewTCompactProtocolFactory() *TCompactProtocolFactory { + return &TCompactProtocolFactory{} +} + +func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTCompactProtocol(trans) +} + +type TCompactProtocol struct { + trans TRichTransport + origTransport TTransport + + // Used to keep track of the last field for the current and previous structs, + // so we can do the delta stuff. + lastField []int + lastFieldId int + + // If we encounter a boolean field begin, save the TField here so it can + // have the value incorporated. + booleanFieldName string + booleanFieldId int16 + booleanFieldPending bool + + // If we read a field header, and it's a boolean field, save the boolean + // value here so that readBool can use it. + boolValue bool + boolValueIsNotNull bool + buffer [64]byte +} + +// Create a TCompactProtocol given a TTransport +func NewTCompactProtocol(trans TTransport) *TCompactProtocol { + p := &TCompactProtocol{origTransport: trans, lastField: []int{}} + if et, ok := trans.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(trans) + } + + return p + +} + +// +// Public Writing methods. +// + +// Write a message header to the wire. Compact Protocol messages contain the +// protocol version so we can migrate forwards in the future if need be. +func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { + err := p.writeByteDirect(COMPACT_PROTOCOL_ID) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) + if err != nil { + return NewTProtocolException(err) + } + _, err = p.writeVarint32(seqid) + if err != nil { + return NewTProtocolException(err) + } + e := p.WriteString(name) + return e + +} + +func (p *TCompactProtocol) WriteMessageEnd() error { return nil } + +// Write a struct begin. This doesn't actually put anything on the wire. We +// use it as an opportunity to put special placeholder markers on the field +// stack so we can get the field id deltas correct. +func (p *TCompactProtocol) WriteStructBegin(name string) error { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return nil +} + +// Write a struct end. This doesn't actually put anything on the wire. We use +// this as an opportunity to pop the last field from the current struct off +// of the field stack. +func (p *TCompactProtocol) WriteStructEnd() error { + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if typeId == BOOL { + // we want to possibly include the value, so we'll wait. + p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true + return nil + } + _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) + return NewTProtocolException(err) +} + +// The workhorse of writeFieldBegin. It has the option of doing a +// 'type override' of the type header. This is used specifically in the +// boolean field case. +func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { + // short lastField = lastField_.pop(); + + // if there's a type override, use that. + var typeToWrite byte + if typeOverride == 0xFF { + typeToWrite = byte(p.getCompactType(typeId)) + } else { + typeToWrite = typeOverride + } + // check if we can use delta encoding for the field id + fieldId := int(id) + written := 0 + if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { + // write them together + err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) + if err != nil { + return 0, err + } + } else { + // write them separate + err := p.writeByteDirect(typeToWrite) + if err != nil { + return 0, err + } + err = p.WriteI16(id) + written = 1 + 2 + if err != nil { + return 0, err + } + } + + p.lastFieldId = fieldId + // p.lastField.Push(field.id); + return written, nil +} + +func (p *TCompactProtocol) WriteFieldEnd() error { return nil } + +func (p *TCompactProtocol) WriteFieldStop() error { + err := p.writeByteDirect(STOP) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if size == 0 { + err := p.writeByteDirect(0) + return NewTProtocolException(err) + } + _, err := p.writeVarint32(int32(size)) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapEnd() error { return nil } + +// Write a list header. +func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteListEnd() error { return nil } + +// Write a set header. +func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteSetEnd() error { return nil } + +func (p *TCompactProtocol) WriteBool(value bool) error { + v := byte(COMPACT_BOOLEAN_FALSE) + if value { + v = byte(COMPACT_BOOLEAN_TRUE) + } + if p.booleanFieldPending { + // we haven't written the field header yet + _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) + p.booleanFieldPending = false + return NewTProtocolException(err) + } + // we're not part of a field, so just write the value. + err := p.writeByteDirect(v) + return NewTProtocolException(err) +} + +// Write a byte. Nothing to see here! +func (p *TCompactProtocol) WriteByte(value int8) error { + err := p.writeByteDirect(byte(value)) + return NewTProtocolException(err) +} + +// Write an I16 as a zigzag varint. +func (p *TCompactProtocol) WriteI16(value int16) error { + _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) + return NewTProtocolException(err) +} + +// Write an i32 as a zigzag varint. +func (p *TCompactProtocol) WriteI32(value int32) error { + _, err := p.writeVarint32(p.int32ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write an i64 as a zigzag varint. +func (p *TCompactProtocol) WriteI64(value int64) error { + _, err := p.writeVarint64(p.int64ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write a double to the wire as 8 bytes. +func (p *TCompactProtocol) WriteDouble(value float64) error { + buf := p.buffer[0:8] + binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) + _, err := p.trans.Write(buf) + return NewTProtocolException(err) +} + +// Write a string to the wire with a varint size preceding. +func (p *TCompactProtocol) WriteString(value string) error { + _, e := p.writeVarint32(int32(len(value))) + if e != nil { + return NewTProtocolException(e) + } + if len(value) > 0 { + } + _, e = p.trans.WriteString(value) + return e +} + +// Write a byte array, using a varint for the size. +func (p *TCompactProtocol) WriteBinary(bin []byte) error { + _, e := p.writeVarint32(int32(len(bin))) + if e != nil { + return NewTProtocolException(e) + } + if len(bin) > 0 { + _, e = p.trans.Write(bin) + return NewTProtocolException(e) + } + return nil +} + +// +// Reading methods. +// + +// Read a message header. +func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + + protocolId, err := p.readByteDirect() + if err != nil { + return + } + + if protocolId != COMPACT_PROTOCOL_ID { + e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) + return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) + } + + versionAndType, err := p.readByteDirect() + if err != nil { + return + } + + version := versionAndType & COMPACT_VERSION_MASK + typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) + if version != COMPACT_VERSION { + e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) + err = NewTProtocolExceptionWithType(BAD_VERSION, e) + return + } + seqId, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + name, err = p.ReadString() + return +} + +func (p *TCompactProtocol) ReadMessageEnd() error { return nil } + +// Read a struct begin. There's nothing on the wire for this, but it is our +// opportunity to push a new struct begin marker onto the field stack. +func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return +} + +// Doesn't actually consume any wire data, just removes the last field for +// this struct from the field stack. +func (p *TCompactProtocol) ReadStructEnd() error { + // consume the last field we read off the wire. + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +// Read a field header off the wire. +func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { + t, err := p.readByteDirect() + if err != nil { + return + } + + // if it's a stop, then we can return immediately, as the struct is over. + if (t & 0x0f) == STOP { + return "", STOP, 0, nil + } + + // mask off the 4 MSB of the type header. it could contain a field id delta. + modifier := int16((t & 0xf0) >> 4) + if modifier == 0 { + // not a delta. look ahead for the zigzag varint field id. + id, err = p.ReadI16() + if err != nil { + return + } + } else { + // has a delta. add the delta to the last read field id. + id = int16(p.lastFieldId) + modifier + } + typeId, e := p.getTType(tCompactType(t & 0x0f)) + if e != nil { + err = NewTProtocolException(e) + return + } + + // if this happens to be a boolean field, the value is encoded in the type + if p.isBoolType(t) { + // save the boolean value in a special instance variable. + p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) + p.boolValueIsNotNull = true + } + + // push the new field onto the field stack so we can keep the deltas going. + p.lastFieldId = int(id) + return +} + +func (p *TCompactProtocol) ReadFieldEnd() error { return nil } + +// Read a map header off the wire. If the size is zero, skip reading the key +// and value type. This means that 0-length maps will yield TMaps without the +// "correct" types. +func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { + size32, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + keyAndValueType := byte(STOP) + if size != 0 { + keyAndValueType, err = p.readByteDirect() + if err != nil { + return + } + } + keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) + valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) + return +} + +func (p *TCompactProtocol) ReadMapEnd() error { return nil } + +// Read a list header off the wire. If the list size is 0-14, the size will +// be packed into the element type header. If it's a longer list, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { + size_and_type, err := p.readByteDirect() + if err != nil { + return + } + size = int((size_and_type >> 4) & 0x0f) + if size == 15 { + size2, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size2 < 0 { + err = invalidDataLength + return + } + size = int(size2) + } + elemType, e := p.getTType(tCompactType(size_and_type)) + if e != nil { + err = NewTProtocolException(e) + return + } + return +} + +func (p *TCompactProtocol) ReadListEnd() error { return nil } + +// Read a set header off the wire. If the set size is 0-14, the size will +// be packed into the element type header. If it's a longer set, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { + return p.ReadListBegin() +} + +func (p *TCompactProtocol) ReadSetEnd() error { return nil } + +// Read a boolean off the wire. If this is a boolean field, the value should +// already have been read during readFieldBegin, so we'll just consume the +// pre-stored value. Otherwise, read a byte. +func (p *TCompactProtocol) ReadBool() (value bool, err error) { + if p.boolValueIsNotNull { + p.boolValueIsNotNull = false + return p.boolValue, nil + } + v, err := p.readByteDirect() + return v == COMPACT_BOOLEAN_TRUE, err +} + +// Read a single byte off the wire. Nothing interesting here. +func (p *TCompactProtocol) ReadByte() (int8, error) { + v, err := p.readByteDirect() + if err != nil { + return 0, NewTProtocolException(err) + } + return int8(v), err +} + +// Read an i16 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI16() (value int16, err error) { + v, err := p.ReadI32() + return int16(v), err +} + +// Read an i32 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI32() (value int32, err error) { + v, e := p.readVarint32() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt32(v) + return value, nil +} + +// Read an i64 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI64() (value int64, err error) { + v, e := p.readVarint64() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt64(v) + return value, nil +} + +// No magic here - just read a double off the wire. +func (p *TCompactProtocol) ReadDouble() (value float64, err error) { + longBits := p.buffer[0:8] + _, e := io.ReadFull(p.trans, longBits) + if e != nil { + return 0.0, NewTProtocolException(e) + } + return math.Float64frombits(p.bytesToUint64(longBits)), nil +} + +// Reads a []byte (via readBinary), and then UTF-8 decodes it. +func (p *TCompactProtocol) ReadString() (value string, err error) { + length, e := p.readVarint32() + if e != nil { + return "", NewTProtocolException(e) + } + if length < 0 { + return "", invalidDataLength + } + if uint64(length) > p.trans.RemainingBytes() { + return "", invalidDataLength + } + + if length == 0 { + return "", nil + } + var buf []byte + if length <= int32(len(p.buffer)) { + buf = p.buffer[0:length] + } else { + buf = make([]byte, length) + } + _, e = io.ReadFull(p.trans, buf) + return string(buf), NewTProtocolException(e) +} + +// Read a []byte from the wire. +func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { + length, e := p.readVarint32() + if e != nil { + return nil, NewTProtocolException(e) + } + if length == 0 { + return []byte{}, nil + } + if length < 0 { + return nil, invalidDataLength + } + if uint64(length) > p.trans.RemainingBytes() { + return nil, invalidDataLength + } + + buf := make([]byte, length) + _, e = io.ReadFull(p.trans, buf) + return buf, NewTProtocolException(e) +} + +func (p *TCompactProtocol) Flush() (err error) { + return NewTProtocolException(p.trans.Flush()) +} + +func (p *TCompactProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TCompactProtocol) Transport() TTransport { + return p.origTransport +} + +// +// Internal writing methods +// + +// Abstract method for writing the start of lists and sets. List and sets on +// the wire differ only by the type indicator. +func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { + if size <= 14 { + return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) + } + err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) + if err != nil { + return 0, err + } + m, err := p.writeVarint32(int32(size)) + return 1 + m, err +} + +// Write an i32 as a varint. Results in 1-5 bytes on the wire. +// TODO(pomack): make a permanent buffer like writeVarint64? +func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { + i32buf := p.buffer[0:5] + idx := 0 + for { + if (n & ^0x7F) == 0 { + i32buf[idx] = byte(n) + idx++ + // p.writeByteDirect(byte(n)); + break + // return; + } else { + i32buf[idx] = byte((n & 0x7F) | 0x80) + idx++ + // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); + u := uint32(n) + n = int32(u >> 7) + } + } + return p.trans.Write(i32buf[0:idx]) +} + +// Write an i64 as a varint. Results in 1-10 bytes on the wire. +func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { + varint64out := p.buffer[0:10] + idx := 0 + for { + if (n & ^0x7F) == 0 { + varint64out[idx] = byte(n) + idx++ + break + } else { + varint64out[idx] = byte((n & 0x7F) | 0x80) + idx++ + u := uint64(n) + n = int64(u >> 7) + } + } + return p.trans.Write(varint64out[0:idx]) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { + return (l << 1) ^ (l >> 63) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { + return (n << 1) ^ (n >> 31) +} + +func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { + binary.LittleEndian.PutUint64(buf, n) +} + +func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { + binary.LittleEndian.PutUint64(buf, uint64(n)) +} + +// Writes a byte without any possibility of all that field header nonsense. +// Used internally by other writing methods that know they need to write a byte. +func (p *TCompactProtocol) writeByteDirect(b byte) error { + return p.trans.WriteByte(b) +} + +// Writes a byte without any possibility of all that field header nonsense. +func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { + return 1, p.writeByteDirect(byte(n)) +} + +// +// Internal reading methods +// + +// Read an i32 from the wire as a varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 5 bytes. +func (p *TCompactProtocol) readVarint32() (int32, error) { + // if the wire contains the right stuff, this will just truncate the i64 we + // read and get us the right sign. + v, err := p.readVarint64() + return int32(v), err +} + +// Read an i64 from the wire as a proper varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 10 bytes. +func (p *TCompactProtocol) readVarint64() (int64, error) { + shift := uint(0) + result := int64(0) + for { + b, err := p.readByteDirect() + if err != nil { + return 0, err + } + result |= int64(b&0x7f) << shift + if (b & 0x80) != 0x80 { + break + } + shift += 7 + } + return result, nil +} + +// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. +func (p *TCompactProtocol) readByteDirect() (byte, error) { + return p.trans.ReadByte() +} + +// +// encoding helpers +// + +// Convert from zigzag int to int. +func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { + u := uint32(n) + return int32(u>>1) ^ -(n & 1) +} + +// Convert from zigzag long to long. +func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { + u := uint64(n) + return int64(u>>1) ^ -(n & 1) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { + return int64(binary.LittleEndian.Uint64(b)) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { + return binary.LittleEndian.Uint64(b) +} + +// +// type testing and converting +// + +func (p *TCompactProtocol) isBoolType(b byte) bool { + return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE +} + +// Given a tCompactType constant, convert it to its corresponding +// TType value. +func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { + switch byte(t) & 0x0f { + case STOP: + return STOP, nil + case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: + return BOOL, nil + case COMPACT_BYTE: + return BYTE, nil + case COMPACT_I16: + return I16, nil + case COMPACT_I32: + return I32, nil + case COMPACT_I64: + return I64, nil + case COMPACT_DOUBLE: + return DOUBLE, nil + case COMPACT_BINARY: + return STRING, nil + case COMPACT_LIST: + return LIST, nil + case COMPACT_SET: + return SET, nil + case COMPACT_MAP: + return MAP, nil + case COMPACT_STRUCT: + return STRUCT, nil + } + return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f)) +} + +// Given a TType value, find the appropriate TCompactProtocol.Types constant. +func (p *TCompactProtocol) getCompactType(t TType) tCompactType { + return ttypeToCompactType[t] +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go new file mode 100644 index 0000000..d37252c --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/debug_protocol.go @@ -0,0 +1,269 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" +) + +type TDebugProtocol struct { + Delegate TProtocol + LogPrefix string +} + +type TDebugProtocolFactory struct { + Underlying TProtocolFactory + LogPrefix string +} + +func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + } +} + +func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return &TDebugProtocol{ + Delegate: t.Underlying.GetProtocol(trans), + LogPrefix: t.LogPrefix, + } +} + +func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { + err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid) + log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) + return err +} +func (tdp *TDebugProtocol) WriteMessageEnd() error { + err := tdp.Delegate.WriteMessageEnd() + log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteStructBegin(name string) error { + err := tdp.Delegate.WriteStructBegin(name) + log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) + return err +} +func (tdp *TDebugProtocol) WriteStructEnd() error { + err := tdp.Delegate.WriteStructEnd() + log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + err := tdp.Delegate.WriteFieldBegin(name, typeId, id) + log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) + return err +} +func (tdp *TDebugProtocol) WriteFieldEnd() error { + err := tdp.Delegate.WriteFieldEnd() + log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteFieldStop() error { + err := tdp.Delegate.WriteFieldStop() + log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + err := tdp.Delegate.WriteMapBegin(keyType, valueType, size) + log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) + return err +} +func (tdp *TDebugProtocol) WriteMapEnd() error { + err := tdp.Delegate.WriteMapEnd() + log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error { + err := tdp.Delegate.WriteListBegin(elemType, size) + log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + return err +} +func (tdp *TDebugProtocol) WriteListEnd() error { + err := tdp.Delegate.WriteListEnd() + log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error { + err := tdp.Delegate.WriteSetBegin(elemType, size) + log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + return err +} +func (tdp *TDebugProtocol) WriteSetEnd() error { + err := tdp.Delegate.WriteSetEnd() + log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteBool(value bool) error { + err := tdp.Delegate.WriteBool(value) + log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteByte(value int8) error { + err := tdp.Delegate.WriteByte(value) + log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteI16(value int16) error { + err := tdp.Delegate.WriteI16(value) + log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteI32(value int32) error { + err := tdp.Delegate.WriteI32(value) + log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteI64(value int64) error { + err := tdp.Delegate.WriteI64(value) + log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteDouble(value float64) error { + err := tdp.Delegate.WriteDouble(value) + log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteString(value string) error { + err := tdp.Delegate.WriteString(value) + log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteBinary(value []byte) error { + err := tdp.Delegate.WriteBinary(value) + log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} + +func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { + name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin() + log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) + return +} +func (tdp *TDebugProtocol) ReadMessageEnd() (err error) { + err = tdp.Delegate.ReadMessageEnd() + log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) { + name, err = tdp.Delegate.ReadStructBegin() + log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) + return +} +func (tdp *TDebugProtocol) ReadStructEnd() (err error) { + err = tdp.Delegate.ReadStructEnd() + log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { + name, typeId, id, err = tdp.Delegate.ReadFieldBegin() + log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) + return +} +func (tdp *TDebugProtocol) ReadFieldEnd() (err error) { + err = tdp.Delegate.ReadFieldEnd() + log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { + keyType, valueType, size, err = tdp.Delegate.ReadMapBegin() + log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) + return +} +func (tdp *TDebugProtocol) ReadMapEnd() (err error) { + err = tdp.Delegate.ReadMapEnd() + log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadListBegin() + log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + return +} +func (tdp *TDebugProtocol) ReadListEnd() (err error) { + err = tdp.Delegate.ReadListEnd() + log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadSetBegin() + log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + return +} +func (tdp *TDebugProtocol) ReadSetEnd() (err error) { + err = tdp.Delegate.ReadSetEnd() + log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadBool() (value bool, err error) { + value, err = tdp.Delegate.ReadBool() + log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadByte() (value int8, err error) { + value, err = tdp.Delegate.ReadByte() + log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadI16() (value int16, err error) { + value, err = tdp.Delegate.ReadI16() + log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadI32() (value int32, err error) { + value, err = tdp.Delegate.ReadI32() + log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadI64() (value int64, err error) { + value, err = tdp.Delegate.ReadI64() + log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) { + value, err = tdp.Delegate.ReadDouble() + log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadString() (value string, err error) { + value, err = tdp.Delegate.ReadString() + log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) { + value, err = tdp.Delegate.ReadBinary() + log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) { + err = tdp.Delegate.Skip(fieldType) + log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) + return +} +func (tdp *TDebugProtocol) Flush() (err error) { + err = tdp.Delegate.Flush() + log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err) + return +} + +func (tdp *TDebugProtocol) Transport() TTransport { + return tdp.Delegate.Transport() +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go new file mode 100644 index 0000000..91a0983 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/deserializer.go @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TDeserializer struct { + Transport TTransport + Protocol TProtocol +} + +func NewTDeserializer() *TDeserializer { + var transport TTransport + transport = NewTMemoryBufferLen(1024) + + protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) + + return &TDeserializer{ + transport, + protocol} +} + +func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) { + err = nil + if _, err = t.Transport.Write([]byte(s)); err != nil { + return + } + if err = msg.Read(t.Protocol); err != nil { + return + } + return +} + +func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) { + err = nil + if _, err = t.Transport.Write(b); err != nil { + return + } + if err = msg.Read(t.Protocol); err != nil { + return + } + return +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go new file mode 100644 index 0000000..ea8d6f6 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/exception.go @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" +) + +// Generic Thrift exception +type TException interface { + error +} + +// Prepends additional information to an error without losing the Thrift exception interface +func PrependError(prepend string, err error) error { + if t, ok := err.(TTransportException); ok { + return NewTTransportException(t.TypeId(), prepend+t.Error()) + } + if t, ok := err.(TProtocolException); ok { + return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) + } + if t, ok := err.(TApplicationException); ok { + return NewTApplicationException(t.TypeId(), prepend+t.Error()) + } + + return errors.New(prepend + err.Error()) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/field.go b/vendor/github.com/apache/thrift/lib/go/thrift/field.go new file mode 100644 index 0000000..9d66525 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/field.go @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Helper class that encapsulates field metadata. +type field struct { + name string + typeId TType + id int +} + +func newField(n string, t TType, i int) *field { + return &field{name: n, typeId: t, id: i} +} + +func (p *field) Name() string { + if p == nil { + return "" + } + return p.name +} + +func (p *field) TypeId() TType { + if p == nil { + return TType(VOID) + } + return p.typeId +} + +func (p *field) Id() int { + if p == nil { + return -1 + } + return p.id +} + +func (p *field) String() string { + if p == nil { + return "" + } + return "" +} + +var ANONYMOUS_FIELD *field + +type fieldSlice []field + +func (p fieldSlice) Len() int { + return len(p) +} + +func (p fieldSlice) Less(i, j int) bool { + return p[i].Id() < p[j].Id() +} + +func (p fieldSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func init() { + ANONYMOUS_FIELD = newField("", STOP, 0) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go new file mode 100644 index 0000000..60b1249 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/framed_transport.go @@ -0,0 +1,172 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "encoding/binary" + "fmt" + "io" +) + +const DEFAULT_MAX_LENGTH = 16384000 + +type TFramedTransport struct { + transport TTransport + buf bytes.Buffer + reader *bufio.Reader + frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header + buffer [4]byte + maxLength uint32 +} + +type tFramedTransportFactory struct { + factory TTransportFactory + maxLength uint32 +} + +func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { + return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH} +} + +func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { + return &tFramedTransportFactory{factory: factory, maxLength: maxLength} +} + +func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { + tt, err := p.factory.GetTransport(base) + if err != nil { + return nil, err + } + return NewTFramedTransportMaxLength(tt, p.maxLength), nil +} + +func NewTFramedTransport(transport TTransport) *TFramedTransport { + return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH} +} + +func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { + return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength} +} + +func (p *TFramedTransport) Open() error { + return p.transport.Open() +} + +func (p *TFramedTransport) IsOpen() bool { + return p.transport.IsOpen() +} + +func (p *TFramedTransport) Close() error { + return p.transport.Close() +} + +func (p *TFramedTransport) Read(buf []byte) (l int, err error) { + if p.frameSize == 0 { + p.frameSize, err = p.readFrameHeader() + if err != nil { + return + } + } + if p.frameSize < uint32(len(buf)) { + frameSize := p.frameSize + tmp := make([]byte, p.frameSize) + l, err = p.Read(tmp) + copy(buf, tmp) + if err == nil { + err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf))) + return + } + } + got, err := p.reader.Read(buf) + p.frameSize = p.frameSize - uint32(got) + //sanity check + if p.frameSize < 0 { + return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size") + } + return got, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) ReadByte() (c byte, err error) { + if p.frameSize == 0 { + p.frameSize, err = p.readFrameHeader() + if err != nil { + return + } + } + if p.frameSize < 1 { + return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1)) + } + c, err = p.reader.ReadByte() + if err == nil { + p.frameSize-- + } + return +} + +func (p *TFramedTransport) Write(buf []byte) (int, error) { + n, err := p.buf.Write(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) WriteByte(c byte) error { + return p.buf.WriteByte(c) +} + +func (p *TFramedTransport) WriteString(s string) (n int, err error) { + return p.buf.WriteString(s) +} + +func (p *TFramedTransport) Flush() error { + size := p.buf.Len() + buf := p.buffer[:4] + binary.BigEndian.PutUint32(buf, uint32(size)) + _, err := p.transport.Write(buf) + if err != nil { + p.buf.Truncate(0) + return NewTTransportExceptionFromError(err) + } + if size > 0 { + if n, err := p.buf.WriteTo(p.transport); err != nil { + print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n") + p.buf.Truncate(0) + return NewTTransportExceptionFromError(err) + } + } + err = p.transport.Flush() + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) readFrameHeader() (uint32, error) { + buf := p.buffer[:4] + if _, err := io.ReadFull(p.reader, buf); err != nil { + return 0, err + } + size := binary.BigEndian.Uint32(buf) + if size < 0 || size > p.maxLength { + return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) + } + return size, nil +} + +func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { + return uint64(p.frameSize) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/go17.go new file mode 100644 index 0000000..e3b21c4 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/go17.go @@ -0,0 +1,26 @@ +// +build go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +var defaultCtx = context.Background() diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go new file mode 100644 index 0000000..33f2aa4 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_client.go @@ -0,0 +1,238 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +// Default to using the shared http client. Library users are +// free to change this global client or specify one through +// THttpClientOptions. +var DefaultHttpClient *http.Client = http.DefaultClient + +type THttpClient struct { + client *http.Client + response *http.Response + url *url.URL + requestBuffer *bytes.Buffer + header http.Header + nsecConnectTimeout int64 + nsecReadTimeout int64 +} + +type THttpClientTransportFactory struct { + options THttpClientOptions + url string +} + +func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*THttpClient) + if ok && t.url != nil { + return NewTHttpClientWithOptions(t.url.String(), p.options) + } + } + return NewTHttpClientWithOptions(p.url, p.options) +} + +type THttpClientOptions struct { + // If nil, DefaultHttpClient is used + Client *http.Client +} + +func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return &THttpClientTransportFactory{url: url, options: options} +} + +func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + parsedURL, err := url.Parse(urlstr) + if err != nil { + return nil, err + } + buf := make([]byte, 0, 1024) + client := options.Client + if client == nil { + client = DefaultHttpClient + } + httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} + return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil +} + +func NewTHttpClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} + +// Set the HTTP Header for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") +func (p *THttpClient) SetHeader(key string, value string) { + p.header.Add(key, value) +} + +// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// hdrValue := httpTrans.GetHeader("User-Agent") +func (p *THttpClient) GetHeader(key string) string { + return p.header.Get(key) +} + +// Deletes the HTTP Header given a Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.DelHeader("User-Agent") +func (p *THttpClient) DelHeader(key string) { + p.header.Del(key) +} + +func (p *THttpClient) Open() error { + // do nothing + return nil +} + +func (p *THttpClient) IsOpen() bool { + return p.response != nil || p.requestBuffer != nil +} + +func (p *THttpClient) closeResponse() error { + var err error + if p.response != nil && p.response.Body != nil { + // The docs specify that if keepalive is enabled and the response body is not + // read to completion the connection will never be returned to the pool and + // reused. Errors are being ignored here because if the connection is invalid + // and this fails for some reason, the Close() method will do any remaining + // cleanup. + io.Copy(ioutil.Discard, p.response.Body) + + err = p.response.Body.Close() + } + + p.response = nil + return err +} + +func (p *THttpClient) Close() error { + if p.requestBuffer != nil { + p.requestBuffer.Reset() + p.requestBuffer = nil + } + return p.closeResponse() +} + +func (p *THttpClient) Read(buf []byte) (int, error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + n, err := p.response.Body.Read(buf) + if n > 0 && (err == nil || err == io.EOF) { + return n, nil + } + return n, NewTTransportExceptionFromError(err) +} + +func (p *THttpClient) ReadByte() (c byte, err error) { + return readByte(p.response.Body) +} + +func (p *THttpClient) Write(buf []byte) (int, error) { + n, err := p.requestBuffer.Write(buf) + return n, err +} + +func (p *THttpClient) WriteByte(c byte) error { + return p.requestBuffer.WriteByte(c) +} + +func (p *THttpClient) WriteString(s string) (n int, err error) { + return p.requestBuffer.WriteString(s) +} + +func (p *THttpClient) Flush() error { + // Close any previous response body to avoid leaking connections. + p.closeResponse() + + req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer) + if err != nil { + return NewTTransportExceptionFromError(err) + } + req.Header = p.header + response, err := p.client.Do(req) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if response.StatusCode != http.StatusOK { + // Close the response to avoid leaking file descriptors. closeResponse does + // more than just call Close(), so temporarily assign it and reuse the logic. + p.response = response + p.closeResponse() + + // TODO(pomack) log bad response + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) + } + p.response = response + return nil +} + +func (p *THttpClient) RemainingBytes() (num_bytes uint64) { + len := p.response.ContentLength + if len >= 0 { + return uint64(len) + } + + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} + +// Deprecated: Use NewTHttpClientTransportFactory instead. +func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. +func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, options) +} + +// Deprecated: Use NewTHttpClientWithOptions instead. +func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, options) +} + +// Deprecated: Use NewTHttpClient instead. +func NewTHttpPostClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go new file mode 100644 index 0000000..601855b --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport.go @@ -0,0 +1,51 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "compress/gzip" + "io" + "net/http" + "strings" +) + +// gz transparently compresses the HTTP response if the client supports it. +func gz(handler http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + handler(w, r) + return + } + w.Header().Set("Content-Encoding", "gzip") + gz := gzip.NewWriter(w) + defer gz.Close() + gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} + handler(gzw, r) + } +} + +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w gzipResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go new file mode 100644 index 0000000..1313ac2 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_go17.go @@ -0,0 +1,38 @@ +// +build go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net/http" +) + +// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function +func NewThriftHandlerFunc(processor TProcessor, + inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { + + return gz(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/x-thrift") + + transport := NewStreamTransport(r.Body, w) + processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) + }) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go new file mode 100644 index 0000000..13aa1c1 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/http_transport_pre_go17.go @@ -0,0 +1,40 @@ +// +build !go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net/http" + + "golang.org/x/net/context" +) + +// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function +func NewThriftHandlerFunc(processor TProcessor, + inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { + + return gz(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/x-thrift") + + transport := NewStreamTransport(r.Body, w) + processor.Process(context.Background(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) + }) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go new file mode 100644 index 0000000..b18be81 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/iostream_transport.go @@ -0,0 +1,213 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "io" +) + +// StreamTransport is a Transport made of an io.Reader and/or an io.Writer +type StreamTransport struct { + io.Reader + io.Writer + isReadWriter bool + closed bool +} + +type StreamTransportFactory struct { + Reader io.Reader + Writer io.Writer + isReadWriter bool +} + +func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*StreamTransport) + if ok { + if t.isReadWriter { + return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil + } + if t.Reader != nil && t.Writer != nil { + return NewStreamTransport(t.Reader, t.Writer), nil + } + if t.Reader != nil && t.Writer == nil { + return NewStreamTransportR(t.Reader), nil + } + if t.Reader == nil && t.Writer != nil { + return NewStreamTransportW(t.Writer), nil + } + return &StreamTransport{}, nil + } + } + if p.isReadWriter { + return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil + } + if p.Reader != nil && p.Writer != nil { + return NewStreamTransport(p.Reader, p.Writer), nil + } + if p.Reader != nil && p.Writer == nil { + return NewStreamTransportR(p.Reader), nil + } + if p.Reader == nil && p.Writer != nil { + return NewStreamTransportW(p.Writer), nil + } + return &StreamTransport{}, nil +} + +func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { + return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} +} + +func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportR(r io.Reader) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r)} +} + +func NewStreamTransportW(w io.Writer) *StreamTransport { + return &StreamTransport{Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { + bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) + return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} +} + +func (p *StreamTransport) IsOpen() bool { + return !p.closed +} + +// implicitly opened on creation, can't be reopened once closed +func (p *StreamTransport) Open() error { + if !p.closed { + return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") + } else { + return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") + } +} + +// Closes both the input and output streams. +func (p *StreamTransport) Close() error { + if p.closed { + return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") + } + p.closed = true + closedReader := false + if p.Reader != nil { + c, ok := p.Reader.(io.Closer) + if ok { + e := c.Close() + closedReader = true + if e != nil { + return e + } + } + p.Reader = nil + } + if p.Writer != nil && (!closedReader || !p.isReadWriter) { + c, ok := p.Writer.(io.Closer) + if ok { + e := c.Close() + if e != nil { + return e + } + } + p.Writer = nil + } + return nil +} + +// Flushes the underlying output stream if not null. +func (p *StreamTransport) Flush() error { + if p.Writer == nil { + return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") + } + f, ok := p.Writer.(Flusher) + if ok { + err := f.Flush() + if err != nil { + return NewTTransportExceptionFromError(err) + } + } + return nil +} + +func (p *StreamTransport) Read(c []byte) (n int, err error) { + n, err = p.Reader.Read(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) ReadByte() (c byte, err error) { + f, ok := p.Reader.(io.ByteReader) + if ok { + c, err = f.ReadByte() + } else { + c, err = readByte(p.Reader) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) Write(c []byte) (n int, err error) { + n, err = p.Writer.Write(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteByte(c byte) (err error) { + f, ok := p.Writer.(io.ByteWriter) + if ok { + err = f.WriteByte(c) + } else { + err = writeByte(p.Writer, c) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteString(s string) (n int, err error) { + f, ok := p.Writer.(stringWriter) + if ok { + n, err = f.WriteString(s) + } else { + n, err = p.Writer.Write([]byte(s)) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go new file mode 100644 index 0000000..442fa91 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/json_protocol.go @@ -0,0 +1,583 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" + "fmt" +) + +const ( + THRIFT_JSON_PROTOCOL_VERSION = 1 +) + +// for references to _ParseContext see tsimplejson_protocol.go + +// JSON protocol implementation for thrift. +// +// This protocol produces/consumes a simple output format +// suitable for parsing by scripting languages. It should not be +// confused with the full-featured TJSONProtocol. +// +type TJSONProtocol struct { + *TSimpleJSONProtocol +} + +// Constructor +func NewTJSONProtocol(t TTransport) *TJSONProtocol { + v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} + v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) + v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + return v +} + +// Factory +type TJSONProtocolFactory struct{} + +func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTJSONProtocol(trans) +} + +func NewTJSONProtocolFactory() *TJSONProtocolFactory { + return &TJSONProtocolFactory{} +} + +func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil { + return e + } + if e := p.WriteString(name); e != nil { + return e + } + if e := p.WriteByte(int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(seqId); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteMessageEnd() error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteStructBegin(name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteStructEnd() error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if e := p.WriteI16(id); e != nil { + return e + } + if e := p.OutputObjectBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(typeId) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteFieldEnd() error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldStop() error { return nil } + +func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(keyType) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + s, e1 = p.TypeIdToString(valueType) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + if e := p.WriteI64(int64(size)); e != nil { + return e + } + return p.OutputObjectBegin() +} + +func (p *TJSONProtocol) WriteMapEnd() error { + if e := p.OutputObjectEnd(); e != nil { + return e + } + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteListEnd() error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteSetEnd() error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteBool(b bool) error { + if b { + return p.WriteI32(1) + } + return p.WriteI32(0) +} + +func (p *TJSONProtocol) WriteByte(b int8) error { + return p.WriteI32(int32(b)) +} + +func (p *TJSONProtocol) WriteI16(v int16) error { + return p.WriteI32(int32(v)) +} + +func (p *TJSONProtocol) WriteI32(v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteI64(v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteDouble(v float64) error { + return p.OutputF64(v) +} + +func (p *TJSONProtocol) WriteString(v string) error { + return p.OutputString(v) +} + +func (p *TJSONProtocol) WriteBinary(v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + version, err := p.ReadI32() + if err != nil { + return name, typeId, seqId, err + } + if version != THRIFT_JSON_PROTOCOL_VERSION { + e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) + return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) + + } + if name, err = p.ReadString(); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte() + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TJSONProtocol) ReadMessageEnd() error { + err := p.ParseListEnd() + return err +} + +func (p *TJSONProtocol) ReadStructBegin() (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TJSONProtocol) ReadStructEnd() error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { + b, _ := p.reader.Peek(1) + if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { + return "", STOP, -1, nil + } + fieldId, err := p.ReadI16() + if err != nil { + return "", STOP, fieldId, err + } + if _, err = p.ParseObjectStart(); err != nil { + return "", STOP, fieldId, err + } + sType, err := p.ReadString() + if err != nil { + return "", STOP, fieldId, err + } + fType, err := p.StringToTypeId(sType) + return "", fType, fieldId, err +} + +func (p *TJSONProtocol) ReadFieldEnd() error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + sKeyType, e := p.ReadString() + if e != nil { + return keyType, valueType, size, e + } + keyType, e = p.StringToTypeId(sKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + sValueType, e := p.ReadString() + if e != nil { + return keyType, valueType, size, e + } + valueType, e = p.StringToTypeId(sValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, e := p.ReadI64() + if e != nil { + return keyType, valueType, size, e + } + size = int(iSize) + + _, e = p.ParseObjectStart() + return keyType, valueType, size, e +} + +func (p *TJSONProtocol) ReadMapEnd() error { + e := p.ParseObjectEnd() + if e != nil { + return e + } + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadListEnd() error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadSetEnd() error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadBool() (bool, error) { + value, err := p.ReadI32() + return (value != 0), err +} + +func (p *TJSONProtocol) ReadByte() (int8, error) { + v, err := p.ReadI64() + return int8(v), err +} + +func (p *TJSONProtocol) ReadI16() (int16, error) { + v, err := p.ReadI64() + return int16(v), err +} + +func (p *TJSONProtocol) ReadI32() (int32, error) { + v, err := p.ReadI64() + return int32(v), err +} + +func (p *TJSONProtocol) ReadI64() (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TJSONProtocol) ReadDouble() (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TJSONProtocol) ReadString() (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) ReadBinary() ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) Flush() (err error) { + err = p.writer.Flush() + if err == nil { + err = p.trans.Flush() + } + return NewTProtocolException(err) +} + +func (p *TJSONProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + if e := p.WriteI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + sElemType, err := p.ReadString() + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, err2 := p.ReadI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + sElemType, err := p.ReadString() + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, err2 := p.ReadI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.OutputString(s); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { + switch byte(fieldType) { + case BOOL: + return "tf", nil + case BYTE: + return "i8", nil + case I16: + return "i16", nil + case I32: + return "i32", nil + case I64: + return "i64", nil + case DOUBLE: + return "dbl", nil + case STRING: + return "str", nil + case STRUCT: + return "rec", nil + case MAP: + return "map", nil + case SET: + return "set", nil + case LIST: + return "lst", nil + } + + e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { + switch fieldType { + case "tf": + return TType(BOOL), nil + case "i8": + return TType(BYTE), nil + case "i16": + return TType(I16), nil + case "i32": + return TType(I32), nil + case "i64": + return TType(I64), nil + case "dbl": + return TType(DOUBLE), nil + case "str": + return TType(STRING), nil + case "rec": + return TType(STRUCT), nil + case "map": + return TType(MAP), nil + case "set": + return TType(SET), nil + case "lst": + return TType(LIST), nil + } + + e := fmt.Errorf("Unknown type identifier: %s", fieldType) + return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go new file mode 100644 index 0000000..97a4edf --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/memory_buffer.go @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" +) + +// Memory buffer-based implementation of the TTransport interface. +type TMemoryBuffer struct { + *bytes.Buffer + size int +} + +type TMemoryBufferTransportFactory struct { + size int +} + +func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*TMemoryBuffer) + if ok && t.size > 0 { + return NewTMemoryBufferLen(t.size), nil + } + } + return NewTMemoryBufferLen(p.size), nil +} + +func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { + return &TMemoryBufferTransportFactory{size: size} +} + +func NewTMemoryBuffer() *TMemoryBuffer { + return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} +} + +func NewTMemoryBufferLen(size int) *TMemoryBuffer { + buf := make([]byte, 0, size) + return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} +} + +func (p *TMemoryBuffer) IsOpen() bool { + return true +} + +func (p *TMemoryBuffer) Open() error { + return nil +} + +func (p *TMemoryBuffer) Close() error { + p.Buffer.Reset() + return nil +} + +// Flushing a memory buffer is a no-op +func (p *TMemoryBuffer) Flush() error { + return nil +} + +func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { + return uint64(p.Buffer.Len()) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go new file mode 100644 index 0000000..25ab2e9 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/messagetype.go @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Message type constants in the Thrift protocol. +type TMessageType int32 + +const ( + INVALID_TMESSAGE_TYPE TMessageType = 0 + CALL TMessageType = 1 + REPLY TMessageType = 2 + EXCEPTION TMessageType = 3 + ONEWAY TMessageType = 4 +) diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go new file mode 100644 index 0000000..b7f4f8a --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol.go @@ -0,0 +1,139 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +/* +TMultiplexedProtocol is a protocol-independent concrete decorator +that allows a Thrift client to communicate with a multiplexing Thrift server, +by prepending the service name to the function name during function calls. + +NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request +from a multiplexing client. + +This example uses a single socket transport to invoke two services: + +socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) +transport := thrift.NewTFramedTransport(socket) +protocol := thrift.NewTBinaryProtocolTransport(transport) + +mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") +service := Calculator.NewCalculatorClient(mp) + +mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") +service2 := WeatherReport.NewWeatherReportClient(mp2) + +err := transport.Open() +if err != nil { + t.Fatal("Unable to open client socket", err) +} + +fmt.Println(service.Add(2,2)) +fmt.Println(service2.GetTemperature()) +*/ + +type TMultiplexedProtocol struct { + TProtocol + serviceName string +} + +const MULTIPLEXED_SEPARATOR = ":" + +func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { + return &TMultiplexedProtocol{ + TProtocol: protocol, + serviceName: serviceName, + } +} + +func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { + if typeId == CALL || typeId == ONEWAY { + return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) + } else { + return t.TProtocol.WriteMessageBegin(name, typeId, seqid) + } +} + +/* +TMultiplexedProcessor is a TProcessor allowing +a single TServer to provide multiple services. + +To do so, you instantiate the processor and then register additional +processors with it, as shown in the following example: + +var processor = thrift.NewTMultiplexedProcessor() + +firstProcessor := +processor.RegisterProcessor("FirstService", firstProcessor) + +processor.registerProcessor( + "Calculator", + Calculator.NewCalculatorProcessor(&CalculatorHandler{}), +) + +processor.registerProcessor( + "WeatherReport", + WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), +) + +serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) +if err != nil { + t.Fatal("Unable to create server socket", err) +} +server := thrift.NewTSimpleServer2(processor, serverTransport) +server.Serve(); +*/ + +type TMultiplexedProcessor struct { + serviceProcessorMap map[string]TProcessor + DefaultProcessor TProcessor +} + +func NewTMultiplexedProcessor() *TMultiplexedProcessor { + return &TMultiplexedProcessor{ + serviceProcessorMap: make(map[string]TProcessor), + } +} + +func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { + t.DefaultProcessor = processor +} + +func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { + if t.serviceProcessorMap == nil { + t.serviceProcessorMap = make(map[string]TProcessor) + } + t.serviceProcessorMap[name] = processor +} + +//Protocol that use stored message for ReadMessageBegin +type storedMessageProtocol struct { + TProtocol + name string + typeId TMessageType + seqid int32 +} + +func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { + return &storedMessageProtocol{protocol, name, typeId, seqid} +} + +func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { + return s.name, s.typeId, s.seqid, nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go new file mode 100644 index 0000000..c71035e --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_go17.go @@ -0,0 +1,53 @@ +// +build go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" + "strings" +) + +func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + name, typeId, seqid, err := in.ReadMessageBegin() + if err != nil { + return false, err + } + if typeId != CALL && typeId != ONEWAY { + return false, fmt.Errorf("Unexpected message type %v", typeId) + } + //extract the service name + v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(v) != 2 { + if t.DefaultProcessor != nil { + smb := NewStoredMessageProtocol(in, name, typeId, seqid) + return t.DefaultProcessor.Process(ctx, smb, out) + } + return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) + } + actualProcessor, ok := t.serviceProcessorMap[v[0]] + if !ok { + return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) + } + smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) + return actualProcessor.Process(ctx, smb, out) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go new file mode 100644 index 0000000..5c27b38 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/multiplexed_protocol_pre_go17.go @@ -0,0 +1,54 @@ +// +build !go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "fmt" + "strings" + + "golang.org/x/net/context" +) + +func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + name, typeId, seqid, err := in.ReadMessageBegin() + if err != nil { + return false, err + } + if typeId != CALL && typeId != ONEWAY { + return false, fmt.Errorf("Unexpected message type %v", typeId) + } + //extract the service name + v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(v) != 2 { + if t.DefaultProcessor != nil { + smb := NewStoredMessageProtocol(in, name, typeId, seqid) + return t.DefaultProcessor.Process(ctx, smb, out) + } + return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) + } + actualProcessor, ok := t.serviceProcessorMap[v[0]] + if !ok { + return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) + } + smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) + return actualProcessor.Process(ctx, smb, out) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go new file mode 100644 index 0000000..aa8daa9 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/numeric.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "math" + "strconv" +) + +type Numeric interface { + Int64() int64 + Int32() int32 + Int16() int16 + Byte() byte + Int() int + Float64() float64 + Float32() float32 + String() string + isNull() bool +} + +type numeric struct { + iValue int64 + dValue float64 + sValue string + isNil bool +} + +var ( + INFINITY Numeric + NEGATIVE_INFINITY Numeric + NAN Numeric + ZERO Numeric + NUMERIC_NULL Numeric +) + +func NewNumericFromDouble(dValue float64) Numeric { + if math.IsInf(dValue, 1) { + return INFINITY + } + if math.IsInf(dValue, -1) { + return NEGATIVE_INFINITY + } + if math.IsNaN(dValue) { + return NAN + } + iValue := int64(dValue) + sValue := strconv.FormatFloat(dValue, 'g', 10, 64) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI64(iValue int64) Numeric { + dValue := float64(iValue) + sValue := string(iValue) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI32(iValue int32) Numeric { + dValue := float64(iValue) + sValue := string(iValue) + isNil := false + return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromString(sValue string) Numeric { + if sValue == INFINITY.String() { + return INFINITY + } + if sValue == NEGATIVE_INFINITY.String() { + return NEGATIVE_INFINITY + } + if sValue == NAN.String() { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + isNil := len(sValue) == 0 + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromJSONString(sValue string, isNull bool) Numeric { + if isNull { + return NewNullNumeric() + } + if sValue == JSON_INFINITY { + return INFINITY + } + if sValue == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY + } + if sValue == JSON_NAN { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} +} + +func NewNullNumeric() Numeric { + return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} +} + +func (p *numeric) Int64() int64 { + return p.iValue +} + +func (p *numeric) Int32() int32 { + return int32(p.iValue) +} + +func (p *numeric) Int16() int16 { + return int16(p.iValue) +} + +func (p *numeric) Byte() byte { + return byte(p.iValue) +} + +func (p *numeric) Int() int { + return int(p.iValue) +} + +func (p *numeric) Float64() float64 { + return p.dValue +} + +func (p *numeric) Float32() float32 { + return float32(p.dValue) +} + +func (p *numeric) String() string { + return p.sValue +} + +func (p *numeric) isNull() bool { + return p.isNil +} + +func init() { + INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} + NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} + NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} + ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} + NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go new file mode 100644 index 0000000..8d6b2c2 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/pointerize.go @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +/////////////////////////////////////////////////////////////////////////////// +// This file is home to helpers that convert from various base types to +// respective pointer types. This is necessary because Go does not permit +// references to constants, nor can a pointer type to base type be allocated +// and initialized in a single expression. +// +// E.g., this is not allowed: +// +// var ip *int = &5 +// +// But this *is* allowed: +// +// func IntPtr(i int) *int { return &i } +// var ip *int = IntPtr(5) +// +// Since pointers to base types are commonplace as [optional] fields in +// exported thrift structs, we factor such helpers here. +/////////////////////////////////////////////////////////////////////////////// + +func Float32Ptr(v float32) *float32 { return &v } +func Float64Ptr(v float64) *float64 { return &v } +func IntPtr(v int) *int { return &v } +func Int32Ptr(v int32) *int32 { return &v } +func Int64Ptr(v int64) *int64 { return &v } +func StringPtr(v string) *string { return &v } +func Uint32Ptr(v uint32) *uint32 { return &v } +func Uint64Ptr(v uint64) *uint64 { return &v } +func BoolPtr(v bool) *bool { return &v } +func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go new file mode 100644 index 0000000..cb564b8 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/pre_go17.go @@ -0,0 +1,26 @@ +// +build !go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "golang.org/x/net/context" + +var defaultCtx = context.Background() diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor.go new file mode 100644 index 0000000..566aaaf --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/processor.go @@ -0,0 +1,34 @@ +// +build !go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "golang.org/x/net/context" + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(ctx context.Context, in, out TProtocol) (bool, TException) +} + +type TProcessorFunction interface { + Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go new file mode 100644 index 0000000..9d645df --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/processor_factory.go @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// The default processor factory just returns a singleton +// instance. +type TProcessorFactory interface { + GetProcessor(trans TTransport) TProcessor +} + +type tProcessorFactory struct { + processor TProcessor +} + +func NewTProcessorFactory(p TProcessor) TProcessorFactory { + return &tProcessorFactory{processor: p} +} + +func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { + return p.processor +} + +/** + * The default processor factory just returns a singleton + * instance. + */ +type TProcessorFunctionFactory interface { + GetProcessorFunction(trans TTransport) TProcessorFunction +} + +type tProcessorFunctionFactory struct { + processor TProcessorFunction +} + +func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { + return &tProcessorFunctionFactory{processor: p} +} + +func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { + return p.processor +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go b/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go new file mode 100644 index 0000000..fb0b165 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/processor_go17.go @@ -0,0 +1,34 @@ +// +build go1.7 + +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(ctx context.Context, in, out TProtocol) (bool, TException) +} + +type TProcessorFunction interface { + Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go new file mode 100644 index 0000000..25e6d24 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol.go @@ -0,0 +1,178 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "fmt" +) + +const ( + VERSION_MASK = 0xffff0000 + VERSION_1 = 0x80010000 +) + +type TProtocol interface { + WriteMessageBegin(name string, typeId TMessageType, seqid int32) error + WriteMessageEnd() error + WriteStructBegin(name string) error + WriteStructEnd() error + WriteFieldBegin(name string, typeId TType, id int16) error + WriteFieldEnd() error + WriteFieldStop() error + WriteMapBegin(keyType TType, valueType TType, size int) error + WriteMapEnd() error + WriteListBegin(elemType TType, size int) error + WriteListEnd() error + WriteSetBegin(elemType TType, size int) error + WriteSetEnd() error + WriteBool(value bool) error + WriteByte(value int8) error + WriteI16(value int16) error + WriteI32(value int32) error + WriteI64(value int64) error + WriteDouble(value float64) error + WriteString(value string) error + WriteBinary(value []byte) error + + ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) + ReadMessageEnd() error + ReadStructBegin() (name string, err error) + ReadStructEnd() error + ReadFieldBegin() (name string, typeId TType, id int16, err error) + ReadFieldEnd() error + ReadMapBegin() (keyType TType, valueType TType, size int, err error) + ReadMapEnd() error + ReadListBegin() (elemType TType, size int, err error) + ReadListEnd() error + ReadSetBegin() (elemType TType, size int, err error) + ReadSetEnd() error + ReadBool() (value bool, err error) + ReadByte() (value int8, err error) + ReadI16() (value int16, err error) + ReadI32() (value int32, err error) + ReadI64() (value int64, err error) + ReadDouble() (value float64, err error) + ReadString() (value string, err error) + ReadBinary() (value []byte, err error) + + Skip(fieldType TType) (err error) + Flush() (err error) + + Transport() TTransport +} + +// The maximum recursive depth the skip() function will traverse +const DEFAULT_RECURSION_DEPTH = 64 + +// Skips over the next data element from the provided input TProtocol object. +func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { + return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) +} + +// Skips over the next data element from the provided input TProtocol object. +func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { + + if maxDepth <= 0 { + return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) + } + + switch fieldType { + case STOP: + return + case BOOL: + _, err = self.ReadBool() + return + case BYTE: + _, err = self.ReadByte() + return + case I16: + _, err = self.ReadI16() + return + case I32: + _, err = self.ReadI32() + return + case I64: + _, err = self.ReadI64() + return + case DOUBLE: + _, err = self.ReadDouble() + return + case STRING: + _, err = self.ReadString() + return + case STRUCT: + if _, err = self.ReadStructBegin(); err != nil { + return err + } + for { + _, typeId, _, _ := self.ReadFieldBegin() + if typeId == STOP { + break + } + err := Skip(self, typeId, maxDepth-1) + if err != nil { + return err + } + self.ReadFieldEnd() + } + return self.ReadStructEnd() + case MAP: + keyType, valueType, size, err := self.ReadMapBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, keyType, maxDepth-1) + if err != nil { + return err + } + self.Skip(valueType) + } + return self.ReadMapEnd() + case SET: + elemType, size, err := self.ReadSetBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadSetEnd() + case LIST: + elemType, size, err := self.ReadListBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadListEnd() + default: + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) + } + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go new file mode 100644 index 0000000..29ab75d --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_exception.go @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" +) + +// Thrift Protocol exception +type TProtocolException interface { + TException + TypeId() int +} + +const ( + UNKNOWN_PROTOCOL_EXCEPTION = 0 + INVALID_DATA = 1 + NEGATIVE_SIZE = 2 + SIZE_LIMIT = 3 + BAD_VERSION = 4 + NOT_IMPLEMENTED = 5 + DEPTH_LIMIT = 6 +) + +type tProtocolException struct { + typeId int + message string +} + +func (p *tProtocolException) TypeId() int { + return p.typeId +} + +func (p *tProtocolException) String() string { + return p.message +} + +func (p *tProtocolException) Error() string { + return p.message +} + +func NewTProtocolException(err error) TProtocolException { + if err == nil { + return nil + } + if e, ok := err.(TProtocolException); ok { + return e + } + if _, ok := err.(base64.CorruptInputError); ok { + return &tProtocolException{INVALID_DATA, err.Error()} + } + return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} +} + +func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { + if err == nil { + return nil + } + return &tProtocolException{errType, err.Error()} +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go new file mode 100644 index 0000000..c40f796 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/protocol_factory.go @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory interface for constructing protocol instances. +type TProtocolFactory interface { + GetProtocol(trans TTransport) TProtocol +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go new file mode 100644 index 0000000..4025beb --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/rich_transport.go @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "io" + +type RichTransport struct { + TTransport +} + +// Wraps Transport to provide TRichTransport interface +func NewTRichTransport(trans TTransport) *RichTransport { + return &RichTransport{trans} +} + +func (r *RichTransport) ReadByte() (c byte, err error) { + return readByte(r.TTransport) +} + +func (r *RichTransport) WriteByte(c byte) error { + return writeByte(r.TTransport, c) +} + +func (r *RichTransport) WriteString(s string) (n int, err error) { + return r.Write([]byte(s)) +} + +func (r *RichTransport) RemainingBytes() (num_bytes uint64) { + return r.TTransport.RemainingBytes() +} + +func readByte(r io.Reader) (c byte, err error) { + v := [1]byte{0} + n, err := r.Read(v[0:1]) + if n > 0 && (err == nil || err == io.EOF) { + return v[0], nil + } + if n > 0 && err != nil { + return v[0], err + } + if err != nil { + return 0, err + } + return v[0], nil +} + +func writeByte(w io.Writer, c byte) error { + v := [1]byte{c} + _, err := w.Write(v[0:1]) + return err +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go new file mode 100644 index 0000000..7712229 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/serializer.go @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TSerializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +type TStruct interface { + Write(p TProtocol) error + Read(p TProtocol) error +} + +func NewTSerializer() *TSerializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) + + return &TSerializer{ + transport, + protocol} +} + +func (t *TSerializer) WriteString(msg TStruct) (s string, err error) { + t.Transport.Reset() + + if err = msg.Write(t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(); err != nil { + return + } + if err = t.Transport.Flush(); err != nil { + return + } + + return t.Transport.String(), nil +} + +func (t *TSerializer) Write(msg TStruct) (b []byte, err error) { + t.Transport.Reset() + + if err = msg.Write(t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(); err != nil { + return + } + + if err = t.Transport.Flush(); err != nil { + return + } + + b = append(b, t.Transport.Bytes()...) + return +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server.go b/vendor/github.com/apache/thrift/lib/go/thrift/server.go new file mode 100644 index 0000000..f813fa3 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server.go @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TServer interface { + ProcessorFactory() TProcessorFactory + ServerTransport() TServerTransport + InputTransportFactory() TTransportFactory + OutputTransportFactory() TTransportFactory + InputProtocolFactory() TProtocolFactory + OutputProtocolFactory() TProtocolFactory + + // Starts the server + Serve() error + // Stops the server. This is optional on a per-implementation basis. Not + // all servers are required to be cleanly stoppable. + Stop() error +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go new file mode 100644 index 0000000..80313c4 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_socket.go @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" + "sync" + "time" +) + +type TServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + + // Protects the interrupted value to make it thread safe. + mu sync.RWMutex + interrupted bool +} + +func NewTServerSocket(listenAddr string) (*TServerSocket, error) { + return NewTServerSocketTimeout(listenAddr, 0) +} + +func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil +} + +// Creates a TServerSocket from a net.Addr +func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { + return &TServerSocket{addr: addr, clientTimeout: clientTimeout} +} + +func (p *TServerSocket) Listen() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return nil + } + l, err := net.Listen(p.addr.Network(), p.addr.String()) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TServerSocket) Accept() (TTransport, error) { + p.mu.RLock() + interrupted := p.interrupted + p.mu.RUnlock() + + if interrupted { + return nil, errTransportInterrupted + } + + listener := p.listener + if listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + + conn, err := listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TServerSocket) Open() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } + return p.addr +} + +func (p *TServerSocket) Close() error { + defer func() { + p.listener = nil + }() + if p.IsListening() { + return p.listener.Close() + } + return nil +} + +func (p *TServerSocket) Interrupt() error { + p.mu.Lock() + defer p.mu.Unlock() + p.interrupted = true + p.Close() + + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go new file mode 100644 index 0000000..51c40b6 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/server_transport.go @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Server transport. Object which provides client transports. +type TServerTransport interface { + Listen() error + Accept() (TTransport, error) + Close() error + + // Optional method implementation. This signals to the server transport + // that it should break out of any accept() or listen() that it is currently + // blocked on. This method, if implemented, MUST be thread safe, as it may + // be called from a different thread context than the other TServerTransport + // methods. + Interrupt() error +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go new file mode 100644 index 0000000..7353322 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_json_protocol.go @@ -0,0 +1,1337 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math" + "strconv" +) + +type _ParseContext int + +const ( + _CONTEXT_IN_TOPLEVEL _ParseContext = 1 + _CONTEXT_IN_LIST_FIRST _ParseContext = 2 + _CONTEXT_IN_LIST _ParseContext = 3 + _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 + _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 + _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 +) + +func (p _ParseContext) String() string { + switch p { + case _CONTEXT_IN_TOPLEVEL: + return "TOPLEVEL" + case _CONTEXT_IN_LIST_FIRST: + return "LIST-FIRST" + case _CONTEXT_IN_LIST: + return "LIST" + case _CONTEXT_IN_OBJECT_FIRST: + return "OBJECT-FIRST" + case _CONTEXT_IN_OBJECT_NEXT_KEY: + return "OBJECT-NEXT-KEY" + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + return "OBJECT-NEXT-VALUE" + } + return "UNKNOWN-PARSE-CONTEXT" +} + +// JSON protocol implementation for thrift. +// +// This protocol produces/consumes a simple output format +// suitable for parsing by scripting languages. It should not be +// confused with the full-featured TJSONProtocol. +// +type TSimpleJSONProtocol struct { + trans TTransport + + parseContextStack []int + dumpContext []int + + writer *bufio.Writer + reader *bufio.Reader +} + +// Constructor +func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { + v := &TSimpleJSONProtocol{trans: t, + writer: bufio.NewWriter(t), + reader: bufio.NewReader(t), + } + v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) + v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + return v +} + +// Factory +type TSimpleJSONProtocolFactory struct{} + +func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTSimpleJSONProtocol(trans) +} + +func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { + return &TSimpleJSONProtocolFactory{} +} + +var ( + JSON_COMMA []byte + JSON_COLON []byte + JSON_LBRACE []byte + JSON_RBRACE []byte + JSON_LBRACKET []byte + JSON_RBRACKET []byte + JSON_QUOTE byte + JSON_QUOTE_BYTES []byte + JSON_NULL []byte + JSON_TRUE []byte + JSON_FALSE []byte + JSON_INFINITY string + JSON_NEGATIVE_INFINITY string + JSON_NAN string + JSON_INFINITY_BYTES []byte + JSON_NEGATIVE_INFINITY_BYTES []byte + JSON_NAN_BYTES []byte + json_nonbase_map_elem_bytes []byte +) + +func init() { + JSON_COMMA = []byte{','} + JSON_COLON = []byte{':'} + JSON_LBRACE = []byte{'{'} + JSON_RBRACE = []byte{'}'} + JSON_LBRACKET = []byte{'['} + JSON_RBRACKET = []byte{']'} + JSON_QUOTE = '"' + JSON_QUOTE_BYTES = []byte{'"'} + JSON_NULL = []byte{'n', 'u', 'l', 'l'} + JSON_TRUE = []byte{'t', 'r', 'u', 'e'} + JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} + JSON_INFINITY = "Infinity" + JSON_NEGATIVE_INFINITY = "-Infinity" + JSON_NAN = "NaN" + JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NAN_BYTES = []byte{'N', 'a', 'N'} + json_nonbase_map_elem_bytes = []byte{']', ',', '['} +} + +func jsonQuote(s string) string { + b, _ := json.Marshal(s) + s1 := string(b) + return s1 +} + +func jsonUnquote(s string) (string, bool) { + s1 := new(string) + err := json.Unmarshal([]byte(s), s1) + return *s1, err == nil +} + +func mismatch(expected, actual string) error { + return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) +} + +func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteString(name); e != nil { + return e + } + if e := p.WriteByte(int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(seqId); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteMessageEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteStructEnd() error { + return p.OutputObjectEnd() +} + +func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if e := p.WriteString(name); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldEnd() error { + //return p.OutputListEnd() + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } + +func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(int8(keyType)); e != nil { + return e + } + if e := p.WriteByte(int8(valueType)); e != nil { + return e + } + return p.WriteI32(int32(size)) +} + +func (p *TSimpleJSONProtocol) WriteMapEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteListEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteSetEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteBool(b bool) error { + return p.OutputBool(b) +} + +func (p *TSimpleJSONProtocol) WriteByte(b int8) error { + return p.WriteI32(int32(b)) +} + +func (p *TSimpleJSONProtocol) WriteI16(v int16) error { + return p.WriteI32(int32(v)) +} + +func (p *TSimpleJSONProtocol) WriteI32(v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteI64(v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { + return p.OutputF64(v) +} + +func (p *TSimpleJSONProtocol) WriteString(v string) error { + return p.OutputString(v) +} + +func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + if name, err = p.ReadString(); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte() + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TSimpleJSONProtocol) ReadMessageEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TSimpleJSONProtocol) ReadStructEnd() error { + return p.ParseObjectEnd() +} + +func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { + if err := p.ParsePreValue(); err != nil { + return "", STOP, 0, err + } + b, _ := p.reader.Peek(1) + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return "", STOP, 0, nil + case JSON_QUOTE: + p.reader.ReadByte() + name, err := p.ParseStringBody() + // simplejson is not meant to be read back into thrift + // - see http://wiki.apache.org/thrift/ThriftUsageJava + // - use JSON instead + if err != nil { + return name, STOP, 0, err + } + return name, STOP, -1, p.ParsePostValue() + /* + if err = p.ParsePostValue(); err != nil { + return name, STOP, 0, err + } + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, STOP, 0, err + } + bType, err := p.ReadByte() + thetype := TType(bType) + if err != nil { + return name, thetype, 0, err + } + id, err := p.ReadI16() + return name, thetype, id, err + */ + } + e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) + return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return "", STOP, 0, NewTProtocolException(io.EOF) +} + +func (p *TSimpleJSONProtocol) ReadFieldEnd() error { + return nil + //return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + bKeyType, e := p.ReadByte() + keyType = TType(bKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + bValueType, e := p.ReadByte() + valueType = TType(bValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, err := p.ReadI64() + size = int(iSize) + return keyType, valueType, size, err +} + +func (p *TSimpleJSONProtocol) ReadMapEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadListEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadSetEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { + var value bool + + if err := p.ParsePreValue(); err != nil { + return value, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 { + switch f[0] { + case JSON_TRUE[0]: + b := make([]byte, len(JSON_TRUE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_TRUE) { + value = true + } else { + e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_FALSE[0]: + b := make([]byte, len(JSON_FALSE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_FALSE) { + value = false + } else { + e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_NULL[0]: + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_NULL) { + value = false + } else { + e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + default: + e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + return value, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { + v, err := p.ReadI64() + return int8(v), err +} + +func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { + v, err := p.ReadI64() + return int16(v), err +} + +func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { + v, err := p.ReadI64() + return int32(v), err +} + +func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadString() (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) Flush() (err error) { + return NewTProtocolException(p.writer.Flush()) +} + +func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TSimpleJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TSimpleJSONProtocol) OutputPreValue() error { + cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + switch cxt { + case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: + if _, e := p.write(JSON_COMMA); e != nil { + return NewTProtocolException(e) + } + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if _, e := p.write(JSON_COLON); e != nil { + return NewTProtocolException(e) + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputPostValue() error { + cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) + break + case _CONTEXT_IN_OBJECT_FIRST: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_KEY: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) + break + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputBool(value bool) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if value { + v = string(JSON_TRUE) + } else { + v = string(JSON_FALSE) + } + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + default: + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputNull() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_NULL); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputF64(value float64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if math.IsNaN(value) { + v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) + } else if math.IsInf(value, 1) { + v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) + } else if math.IsInf(value, -1) { + v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) + } else { + v = strconv.FormatFloat(value, 'g', -1, 64) + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = string(JSON_QUOTE) + v + string(JSON_QUOTE) + default: + } + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputI64(value int64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + v := strconv.FormatInt(value, 10) + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + default: + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputString(s string) error { + if e := p.OutputPreValue(); e != nil { + return e + } + if e := p.OutputStringData(jsonQuote(s)); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputStringData(s string) error { + _, e := p.write([]byte(s)) + return NewTProtocolException(e) +} + +func (p *TSimpleJSONProtocol) OutputObjectBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) + return nil +} + +func (p *TSimpleJSONProtocol) OutputObjectEnd() error { + if _, e := p.write(JSON_RBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputListBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) + return nil +} + +func (p *TSimpleJSONProtocol) OutputListEnd() error { + if _, e := p.write(JSON_RBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(int8(elemType)); e != nil { + return e + } + if e := p.WriteI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePreValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + b, _ := p.reader.Peek(1) + switch cxt { + case _CONTEXT_IN_LIST: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACKET[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + case _CONTEXT_IN_OBJECT_NEXT_KEY: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if len(b) > 0 { + switch b[0] { + case JSON_COLON[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePostValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) + break + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) + break + } + return nil +} + +func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { + for { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return nil + } + switch b[0] { + case ' ', '\r', '\n', '\t': + p.reader.ReadByte() + continue + default: + break + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + v, ok := jsonUnquote(string(JSON_QUOTE) + line) + if !ok { + return "", NewTProtocolException(err) + } + return v, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + str := string(JSON_QUOTE) + line + s + v, ok := jsonUnquote(str) + if !ok { + e := fmt.Errorf("Unable to parse as JSON string %s", str) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + return line, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + v := line + s + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { + line, err := p.reader.ReadBytes(JSON_QUOTE) + if err != nil { + return line, NewTProtocolException(err) + } + line2 := line[0 : len(line)-1] + l := len(line2) + if (l % 4) != 0 { + pad := 4 - (l % 4) + fill := [...]byte{'=', '=', '='} + line2 = append(line2, fill[:pad]...) + l = len(line2) + } + output := make([]byte, base64.StdEncoding.DecodedLen(l)) + n, err := base64.StdEncoding.Decode(output, line2) + return output[0:n], NewTProtocolException(err) +} + +func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value int64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Int64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value float64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Float64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { + if err := p.ParsePreValue(); err != nil { + return false, err + } + var b []byte + b, err := p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) > 0 && b[0] == JSON_LBRACE[0] { + p.reader.ReadByte() + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) + return false, nil + } else if p.safePeekContains(JSON_NULL) { + return true, nil + } + e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) + return false, NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TSimpleJSONProtocol) ParseObjectEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { + e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACE[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', '}': + break + } + } + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { + if e := p.ParsePreValue(); e != nil { + return false, e + } + var b []byte + b, err = p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) + p.reader.ReadByte() + isNull = false + } else if p.safePeekContains(JSON_NULL) { + isNull = true + } else { + err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) + } + return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) +} + +func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + bElemType, err := p.ReadByte() + elemType = TType(bElemType) + if err != nil { + return elemType, size, err + } + nSize, err2 := p.ReadI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TSimpleJSONProtocol) ParseListEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + if cxt != _CONTEXT_IN_LIST { + e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACKET[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of list \"]\", but found: \"", line, "\"") + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): + break + } + } + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { + return nil + } + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { + e := p.readNonSignificantWhitespace() + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + b, e := p.reader.Peek(1) + if len(b) > 0 { + c := b[0] + switch c { + case JSON_NULL[0]: + buf := make([]byte, len(JSON_NULL)) + _, e := p.reader.Read(buf) + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + if string(JSON_NULL) != string(buf) { + e = mismatch(string(JSON_NULL), string(buf)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return nil, VOID, nil + case JSON_QUOTE: + p.reader.ReadByte() + v, e := p.ParseStringBody() + if e != nil { + return v, UTF8, NewTProtocolException(e) + } + if v == JSON_INFINITY { + return INFINITY, DOUBLE, nil + } else if v == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY, DOUBLE, nil + } else if v == JSON_NAN { + return NAN, DOUBLE, nil + } + return v, UTF8, nil + case JSON_TRUE[0]: + buf := make([]byte, len(JSON_TRUE)) + _, e := p.reader.Read(buf) + if e != nil { + return true, BOOL, NewTProtocolException(e) + } + if string(JSON_TRUE) != string(buf) { + e := mismatch(string(JSON_TRUE), string(buf)) + return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return true, BOOL, nil + case JSON_FALSE[0]: + buf := make([]byte, len(JSON_FALSE)) + _, e := p.reader.Read(buf) + if e != nil { + return false, BOOL, NewTProtocolException(e) + } + if string(JSON_FALSE) != string(buf) { + e := mismatch(string(JSON_FALSE), string(buf)) + return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return false, BOOL, nil + case JSON_LBRACKET[0]: + _, e := p.reader.ReadByte() + return make([]interface{}, 0), LIST, NewTProtocolException(e) + case JSON_LBRACE[0]: + _, e := p.reader.ReadByte() + return make(map[string]interface{}), STRUCT, NewTProtocolException(e) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: + // assume numeric + v, e := p.readNumeric() + return v, DOUBLE, e + default: + e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + e = fmt.Errorf("Cannot read a single element while parsing JSON.") + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + +} + +func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { + cont := true + for cont { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return false, nil + } + switch b[0] { + default: + return false, nil + case JSON_NULL[0]: + cont = false + break + case ' ', '\n', '\r', '\t': + p.reader.ReadByte() + break + } + } + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + return true, nil + } + return false, nil +} + +func (p *TSimpleJSONProtocol) readQuoteIfNext() { + b, _ := p.reader.Peek(1) + if len(b) > 0 && b[0] == JSON_QUOTE { + p.reader.ReadByte() + } +} + +func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { + isNull, err := p.readIfNull() + if isNull || err != nil { + return NUMERIC_NULL, err + } + hasDecimalPoint := false + nextCanBeSign := true + hasE := false + MAX_LEN := 40 + buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) + continueFor := true + inQuotes := false + for continueFor { + c, err := p.reader.ReadByte() + if err != nil { + if err == io.EOF { + break + } + return NUMERIC_NULL, NewTProtocolException(err) + } + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + buf.WriteByte(c) + nextCanBeSign = false + case '.': + if hasDecimalPoint { + e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if hasE { + e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasDecimalPoint, nextCanBeSign = true, false + case 'e', 'E': + if hasE { + e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasE, nextCanBeSign = true, true + case '-', '+': + if !nextCanBeSign { + e := fmt.Errorf("Negative sign within number") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + nextCanBeSign = false + case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: + p.reader.UnreadByte() + continueFor = false + case JSON_NAN[0]: + if buf.Len() == 0 { + buffer := make([]byte, len(JSON_NAN)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NAN != string(buffer) { + e := mismatch(JSON_NAN, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NAN, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_INFINITY[0]: + if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { + buffer := make([]byte, len(JSON_INFINITY)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_INFINITY != string(buffer) { + e := mismatch(JSON_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return INFINITY, nil + } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { + buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) + buffer[0] = JSON_NEGATIVE_INFINITY[0] + buffer[1] = c + _, e := p.reader.Read(buffer[2:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NEGATIVE_INFINITY != string(buffer) { + e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NEGATIVE_INFINITY, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_QUOTE: + if !inQuotes { + inQuotes = true + } else { + break + } + default: + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + if buf.Len() == 0 { + e := fmt.Errorf("Unable to parse number from empty string ''") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return NewNumericFromJSONString(buf.String(), false), nil +} + +// Safely peeks into the buffer, reading only what is necessary +func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { + for i := 0; i < len(b); i++ { + a, _ := p.reader.Peek(i + 1) + if len(a) == 0 || a[i] != b[i] { + return false + } + } + return true +} + +// Reset the context stack to its initial state. +func (p *TSimpleJSONProtocol) resetContextStack() { + p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} + p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} +} + +func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { + n, err := p.writer.Write(b) + if err != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + } + return n, err +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go new file mode 100644 index 0000000..37081bd --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/simple_server.go @@ -0,0 +1,215 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" + "runtime/debug" + "sync" + "sync/atomic" +) + +/* + * This is not a typical TSimpleServer as it is not blocked after accept a socket. + * It is more like a TThreadedServer that can handle different connections in different goroutines. + * This will work if golang user implements a conn-pool like thing in client side. + */ +type TSimpleServer struct { + closed int32 + wg sync.WaitGroup + mu sync.Mutex + + processorFactory TProcessorFactory + serverTransport TServerTransport + inputTransportFactory TTransportFactory + outputTransportFactory TTransportFactory + inputProtocolFactory TProtocolFactory + outputProtocolFactory TProtocolFactory +} + +func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) +} + +func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory4(NewTProcessorFactory(processor), + serverTransport, + transportFactory, + protocolFactory, + ) +} + +func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(NewTProcessorFactory(processor), + serverTransport, + inputTransportFactory, + outputTransportFactory, + inputProtocolFactory, + outputProtocolFactory, + ) +} + +func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + NewTTransportFactory(), + NewTTransportFactory(), + NewTBinaryProtocolFactoryDefault(), + NewTBinaryProtocolFactoryDefault(), + ) +} + +func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + transportFactory, + transportFactory, + protocolFactory, + protocolFactory, + ) +} + +func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return &TSimpleServer{ + processorFactory: processorFactory, + serverTransport: serverTransport, + inputTransportFactory: inputTransportFactory, + outputTransportFactory: outputTransportFactory, + inputProtocolFactory: inputProtocolFactory, + outputProtocolFactory: outputProtocolFactory, + } +} + +func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { + return p.processorFactory +} + +func (p *TSimpleServer) ServerTransport() TServerTransport { + return p.serverTransport +} + +func (p *TSimpleServer) InputTransportFactory() TTransportFactory { + return p.inputTransportFactory +} + +func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { + return p.outputTransportFactory +} + +func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { + return p.inputProtocolFactory +} + +func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { + return p.outputProtocolFactory +} + +func (p *TSimpleServer) Listen() error { + return p.serverTransport.Listen() +} + +func (p *TSimpleServer) AcceptLoop() error { + for { + client, err := p.serverTransport.Accept() + p.mu.Lock() + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + if err != nil { + return err + } + if client != nil { + p.wg.Add(1) + go func() { + defer p.wg.Done() + if err := p.processRequests(client); err != nil { + log.Println("error processing request:", err) + } + }() + } + p.mu.Unlock() + } +} + +func (p *TSimpleServer) Serve() error { + err := p.Listen() + if err != nil { + return err + } + p.AcceptLoop() + return nil +} + +func (p *TSimpleServer) Stop() error { + p.mu.Lock() + defer p.mu.Unlock() + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + atomic.StoreInt32(&p.closed, 1) + p.serverTransport.Interrupt() + p.wg.Wait() + return nil +} + +func (p *TSimpleServer) processRequests(client TTransport) error { + processor := p.processorFactory.GetProcessor(client) + inputTransport, err := p.inputTransportFactory.GetTransport(client) + if err != nil { + return err + } + outputTransport, err := p.outputTransportFactory.GetTransport(client) + if err != nil { + return err + } + inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) + outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport) + defer func() { + if e := recover(); e != nil { + log.Printf("panic in processor: %s: %s", e, debug.Stack()) + } + }() + + if inputTransport != nil { + defer inputTransport.Close() + } + if outputTransport != nil { + defer outputTransport.Close() + } + for { + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + + ok, err := processor.Process(defaultCtx, inputProtocol, outputProtocol) + if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE { + return nil + } else if err != nil { + return err + } + if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD { + continue + } + if !ok { + break + } + } + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go new file mode 100644 index 0000000..383b1fe --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/socket.go @@ -0,0 +1,165 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" + "time" +) + +type TSocket struct { + conn net.Conn + addr net.Addr + timeout time.Duration +} + +// NewTSocket creates a net.Conn-backed TTransport, given a host and port +// +// Example: +// trans, err := thrift.NewTSocket("localhost:9090") +func NewTSocket(hostPort string) (*TSocket, error) { + return NewTSocketTimeout(hostPort, 0) +} + +// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port +// it also accepts a timeout as a time.Duration +func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) { + //conn, err := net.DialTimeout(network, address, timeout) + addr, err := net.ResolveTCPAddr("tcp", hostPort) + if err != nil { + return nil, err + } + return NewTSocketFromAddrTimeout(addr, timeout), nil +} + +// Creates a TSocket from a net.Addr +func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket { + return &TSocket{addr: addr, timeout: timeout} +} + +// Creates a TSocket from an existing net.Conn +func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket { + return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout} +} + +// Sets the socket timeout +func (p *TSocket) SetTimeout(timeout time.Duration) error { + p.timeout = timeout + return nil +} + +func (p *TSocket) pushDeadline(read, write bool) { + var t time.Time + if p.timeout > 0 { + t = time.Now().Add(time.Duration(p.timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSocket) Open() error { + if p.IsOpen() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + var err error + if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSocket) IsOpen() bool { + if p.conn == nil { + return false + } + return true +} + +// Closes the socket. +func (p *TSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +//Returns the remote address of the socket. +func (p *TSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSocket) Read(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSocket) Write(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSocket) Flush() error { + return nil +} + +func (p *TSocket) Interrupt() error { + if !p.IsOpen() { + return nil + } + return p.conn.Close() +} + +func (p *TSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go new file mode 100644 index 0000000..907afca --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_server_socket.go @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "net" + "time" +) + +type TSSLServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + interrupted bool + cfg *tls.Config +} + +func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { + return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) +} + +func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { + if cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil +} + +func (p *TSSLServerSocket) Listen() error { + if p.IsListening() { + return nil + } + l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TSSLServerSocket) Accept() (TTransport, error) { + if p.interrupted { + return nil, errTransportInterrupted + } + if p.listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + conn, err := p.listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TSSLServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLServerSocket) Open() error { + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TSSLServerSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSSLServerSocket) Close() error { + defer func() { + p.listener = nil + }() + if p.IsListening() { + return p.listener.Close() + } + return nil +} + +func (p *TSSLServerSocket) Interrupt() error { + p.interrupted = true + return nil +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go new file mode 100644 index 0000000..8272703 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/ssl_socket.go @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "net" + "time" +) + +type TSSLSocket struct { + conn net.Conn + // hostPort contains host:port (e.g. "asdf.com:12345"). The field is + // only valid if addr is nil. + hostPort string + // addr is nil when hostPort is not "", and is only used when the + // TSSLSocket is constructed from a net.Addr. + addr net.Addr + timeout time.Duration + cfg *tls.Config +} + +// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration +// +// Example: +// trans, err := thrift.NewTSSLSocket("localhost:9090", nil) +func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { + return NewTSSLSocketTimeout(hostPort, cfg, 0) +} + +// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port +// it also accepts a tls Configuration and a timeout as a time.Duration +func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) { + if cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil +} + +// Creates a TSSLSocket from a net.Addr +func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket { + return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg} +} + +// Creates a TSSLSocket from an existing net.Conn +func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket { + return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg} +} + +// Sets the socket timeout +func (p *TSSLSocket) SetTimeout(timeout time.Duration) error { + p.timeout = timeout + return nil +} + +func (p *TSSLSocket) pushDeadline(read, write bool) { + var t time.Time + if p.timeout > 0 { + t = time.Now().Add(time.Duration(p.timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLSocket) Open() error { + var err error + // If we have a hostname, we need to pass the hostname to tls.Dial for + // certificate hostname checks. + if p.hostPort != "" { + if p.conn, err = tls.Dial("tcp", p.hostPort, p.cfg); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } else { + if p.IsOpen() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + if p.conn, err = tls.Dial(p.addr.Network(), p.addr.String(), p.cfg); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSSLSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSSLSocket) IsOpen() bool { + if p.conn == nil { + return false + } + return true +} + +// Closes the socket. +func (p *TSSLSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +func (p *TSSLSocket) Read(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSSLSocket) Write(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSSLSocket) Flush() error { + return nil +} + +func (p *TSSLSocket) Interrupt() error { + if !p.IsOpen() { + return nil + } + return p.conn.Close() +} + +func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go new file mode 100644 index 0000000..70a85a8 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport.go @@ -0,0 +1,65 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +var errTransportInterrupted = errors.New("Transport Interrupted") + +type Flusher interface { + Flush() (err error) +} + +type ReadSizeProvider interface { + RemainingBytes() (num_bytes uint64) +} + +// Encapsulates the I/O layer +type TTransport interface { + io.ReadWriteCloser + Flusher + ReadSizeProvider + + // Opens the transport for communication + Open() error + + // Returns true if the transport is open + IsOpen() bool +} + +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// This is "enchanced" transport with extra capabilities. You need to use one of these +// to construct protocol. +// Notably, TSocket does not implement this interface, and it is always a mistake to use +// TSocket directly in protocol. +type TRichTransport interface { + io.ReadWriter + io.ByteReader + io.ByteWriter + stringWriter + Flusher + ReadSizeProvider +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go new file mode 100644 index 0000000..9505b44 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_exception.go @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type timeoutable interface { + Timeout() bool +} + +// Thrift Transport exception +type TTransportException interface { + TException + TypeId() int + Err() error +} + +const ( + UNKNOWN_TRANSPORT_EXCEPTION = 0 + NOT_OPEN = 1 + ALREADY_OPEN = 2 + TIMED_OUT = 3 + END_OF_FILE = 4 +) + +type tTransportException struct { + typeId int + err error +} + +func (p *tTransportException) TypeId() int { + return p.typeId +} + +func (p *tTransportException) Error() string { + return p.err.Error() +} + +func (p *tTransportException) Err() error { + return p.err +} + +func NewTTransportException(t int, e string) TTransportException { + return &tTransportException{typeId: t, err: errors.New(e)} +} + +func NewTTransportExceptionFromError(e error) TTransportException { + if e == nil { + return nil + } + + if t, ok := e.(TTransportException); ok { + return t + } + + switch v := e.(type) { + case TTransportException: + return v + case timeoutable: + if v.Timeout() { + return &tTransportException{typeId: TIMED_OUT, err: e} + } + } + + if e == io.EOF { + return &tTransportException{typeId: END_OF_FILE, err: e} + } + + return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go new file mode 100644 index 0000000..c805807 --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/transport_factory.go @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory class used to create wrapped instance of Transports. +// This is used primarily in servers, which get Transports from +// a ServerTransport and then may want to mutate them (i.e. create +// a BufferedTransport from the underlying base transport) +type TTransportFactory interface { + GetTransport(trans TTransport) (TTransport, error) +} + +type tTransportFactory struct{} + +// Return a wrapped instance of the base Transport. +func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return trans, nil +} + +func NewTTransportFactory() TTransportFactory { + return &tTransportFactory{} +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/type.go b/vendor/github.com/apache/thrift/lib/go/thrift/type.go new file mode 100644 index 0000000..4292ffc --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/type.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Type constants in the Thrift protocol +type TType byte + +const ( + STOP = 0 + VOID = 1 + BOOL = 2 + BYTE = 3 + I08 = 3 + DOUBLE = 4 + I16 = 6 + I32 = 8 + I64 = 10 + STRING = 11 + UTF7 = 11 + STRUCT = 12 + MAP = 13 + SET = 14 + LIST = 15 + UTF8 = 16 + UTF16 = 17 + //BINARY = 18 wrong and unusued +) + +var typeNames = map[int]string{ + STOP: "STOP", + VOID: "VOID", + BOOL: "BOOL", + BYTE: "BYTE", + DOUBLE: "DOUBLE", + I16: "I16", + I32: "I32", + I64: "I64", + STRING: "STRING", + STRUCT: "STRUCT", + MAP: "MAP", + SET: "SET", + LIST: "LIST", + UTF8: "UTF8", + UTF16: "UTF16", +} + +func (p TType) String() string { + if s, ok := typeNames[int(p)]; ok { + return s + } + return "Unknown" +} diff --git a/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go new file mode 100644 index 0000000..6f477ca --- /dev/null +++ b/vendor/github.com/apache/thrift/lib/go/thrift/zlib_transport.go @@ -0,0 +1,116 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. + */ + +package thrift + +import ( + "compress/zlib" + "io" + "log" +) + +// TZlibTransportFactory is a factory for TZlibTransport instances +type TZlibTransportFactory struct { + level int +} + +// TZlibTransport is a TTransport implementation that makes use of zlib compression. +type TZlibTransport struct { + reader io.ReadCloser + transport TTransport + writer *zlib.Writer +} + +// GetTransport constructs a new instance of NewTZlibTransport +func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return NewTZlibTransport(trans, p.level) +} + +// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory +func NewTZlibTransportFactory(level int) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level} +} + +// NewTZlibTransport constructs a new instance of TZlibTransport +func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { + w, err := zlib.NewWriterLevel(trans, level) + if err != nil { + log.Println(err) + return nil, err + } + + return &TZlibTransport{ + writer: w, + transport: trans, + }, nil +} + +// Close closes the reader and writer (flushing any unwritten data) and closes +// the underlying transport. +func (z *TZlibTransport) Close() error { + if z.reader != nil { + if err := z.reader.Close(); err != nil { + return err + } + } + if err := z.writer.Close(); err != nil { + return err + } + return z.transport.Close() +} + +// Flush flushes the writer and its underlying transport. +func (z *TZlibTransport) Flush() error { + if err := z.writer.Flush(); err != nil { + return err + } + return z.transport.Flush() +} + +// IsOpen returns true if the transport is open +func (z *TZlibTransport) IsOpen() bool { + return z.transport.IsOpen() +} + +// Open opens the transport for communication +func (z *TZlibTransport) Open() error { + return z.transport.Open() +} + +func (z *TZlibTransport) Read(p []byte) (int, error) { + if z.reader == nil { + r, err := zlib.NewReader(z.transport) + if err != nil { + return 0, NewTTransportExceptionFromError(err) + } + z.reader = r + } + + return z.reader.Read(p) +} + +// RemainingBytes returns the size in bytes of the data that is still to be +// read. +func (z *TZlibTransport) RemainingBytes() uint64 { + return z.transport.RemainingBytes() +} + +func (z *TZlibTransport) Write(p []byte) (int, error) { + return z.writer.Write(p) +} diff --git a/vendor/github.com/bwmarrin/discordgo/discord.go b/vendor/github.com/bwmarrin/discordgo/discord.go new file mode 100644 index 0000000..2f0b6fd --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/discord.go @@ -0,0 +1,145 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains high level helper functions and easy entry points for the +// entire discordgo package. These functions are beling developed and are very +// experimental at this point. They will most likley change so please use the +// low level functions if that's a problem. + +// Package discordgo provides Discord binding for Go +package discordgo + +import ( + "errors" + "fmt" + "net/http" + "time" +) + +// VERSION of DiscordGo, follows Semantic Versioning. (http://semver.org/) +const VERSION = "0.16.0" + +// ErrMFA will be risen by New when the user has 2FA. +var ErrMFA = errors.New("account has 2FA enabled") + +// New creates a new Discord session and will automate some startup +// tasks if given enough information to do so. Currently you can pass zero +// arguments and it will return an empty Discord session. +// There are 3 ways to call New: +// With a single auth token - All requests will use the token blindly, +// no verification of the token will be done and requests may fail. +// IF THE TOKEN IS FOR A BOT, IT MUST BE PREFIXED WITH `BOT ` +// eg: `"Bot "` +// With an email and password - Discord will sign in with the provided +// credentials. +// With an email, password and auth token - Discord will verify the auth +// token, if it is invalid it will sign in with the provided +// credentials. This is the Discord recommended way to sign in. +// +// NOTE: While email/pass authentication is supported by DiscordGo it is +// HIGHLY DISCOURAGED by Discord. Please only use email/pass to obtain a token +// and then use that authentication token for all future connections. +// Also, doing any form of automation with a user (non Bot) account may result +// in that account being permanently banned from Discord. +func New(args ...interface{}) (s *Session, err error) { + + // Create an empty Session interface. + s = &Session{ + State: NewState(), + ratelimiter: NewRatelimiter(), + StateEnabled: true, + Compress: true, + ShouldReconnectOnError: true, + ShardID: 0, + ShardCount: 1, + MaxRestRetries: 3, + Client: &http.Client{Timeout: (20 * time.Second)}, + sequence: new(int64), + } + + // If no arguments are passed return the empty Session interface. + if args == nil { + return + } + + // Variables used below when parsing func arguments + var auth, pass string + + // Parse passed arguments + for _, arg := range args { + + switch v := arg.(type) { + + case []string: + if len(v) > 3 { + err = fmt.Errorf("too many string parameters provided") + return + } + + // First string is either token or username + if len(v) > 0 { + auth = v[0] + } + + // If second string exists, it must be a password. + if len(v) > 1 { + pass = v[1] + } + + // If third string exists, it must be an auth token. + if len(v) > 2 { + s.Token = v[2] + } + + case string: + // First string must be either auth token or username. + // Second string must be a password. + // Only 2 input strings are supported. + + if auth == "" { + auth = v + } else if pass == "" { + pass = v + } else if s.Token == "" { + s.Token = v + } else { + err = fmt.Errorf("too many string parameters provided") + return + } + + // case Config: + // TODO: Parse configuration struct + + default: + err = fmt.Errorf("unsupported parameter type provided") + return + } + } + + // If only one string was provided, assume it is an auth token. + // Otherwise get auth token from Discord, if a token was specified + // Discord will verify it for free, or log the user in if it is + // invalid. + if pass == "" { + s.Token = auth + } else { + err = s.Login(auth, pass) + if err != nil || s.Token == "" { + if s.MFA { + err = ErrMFA + } else { + err = fmt.Errorf("Unable to fetch discord authentication token. %v", err) + } + return + } + } + + // The Session is now able to have RestAPI methods called on it. + // It is recommended that you now call Open() so that events will trigger. + + return +} diff --git a/vendor/github.com/bwmarrin/discordgo/endpoints.go b/vendor/github.com/bwmarrin/discordgo/endpoints.go new file mode 100644 index 0000000..96bcf28 --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/endpoints.go @@ -0,0 +1,127 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains variables for all known Discord end points. All functions +// throughout the Discordgo package use these variables for all connections +// to Discord. These are all exported and you may modify them if needed. + +package discordgo + +// Known Discord API Endpoints. +var ( + EndpointStatus = "https://status.discordapp.com/api/v2/" + EndpointSm = EndpointStatus + "scheduled-maintenances/" + EndpointSmActive = EndpointSm + "active.json" + EndpointSmUpcoming = EndpointSm + "upcoming.json" + + EndpointDiscord = "https://discordapp.com/" + EndpointAPI = EndpointDiscord + "api/" + EndpointGuilds = EndpointAPI + "guilds/" + EndpointChannels = EndpointAPI + "channels/" + EndpointUsers = EndpointAPI + "users/" + EndpointGateway = EndpointAPI + "gateway" + EndpointWebhooks = EndpointAPI + "webhooks/" + + EndpointCDN = "https://cdn.discordapp.com/" + EndpointCDNAttachments = EndpointCDN + "attachments/" + EndpointCDNAvatars = EndpointCDN + "avatars/" + EndpointCDNIcons = EndpointCDN + "icons/" + EndpointCDNSplashes = EndpointCDN + "splashes/" + EndpointCDNChannelIcons = EndpointCDN + "channel-icons/" + + EndpointAuth = EndpointAPI + "auth/" + EndpointLogin = EndpointAuth + "login" + EndpointLogout = EndpointAuth + "logout" + EndpointVerify = EndpointAuth + "verify" + EndpointVerifyResend = EndpointAuth + "verify/resend" + EndpointForgotPassword = EndpointAuth + "forgot" + EndpointResetPassword = EndpointAuth + "reset" + EndpointRegister = EndpointAuth + "register" + + EndpointVoice = EndpointAPI + "/voice/" + EndpointVoiceRegions = EndpointVoice + "regions" + EndpointVoiceIce = EndpointVoice + "ice" + + EndpointTutorial = EndpointAPI + "tutorial/" + EndpointTutorialIndicators = EndpointTutorial + "indicators" + + EndpointTrack = EndpointAPI + "track" + EndpointSso = EndpointAPI + "sso" + EndpointReport = EndpointAPI + "report" + EndpointIntegrations = EndpointAPI + "integrations" + + EndpointUser = func(uID string) string { return EndpointUsers + uID } + EndpointUserAvatar = func(uID, aID string) string { return EndpointCDNAvatars + uID + "/" + aID + ".png" } + EndpointUserSettings = func(uID string) string { return EndpointUsers + uID + "/settings" } + EndpointUserGuilds = func(uID string) string { return EndpointUsers + uID + "/guilds" } + EndpointUserGuild = func(uID, gID string) string { return EndpointUsers + uID + "/guilds/" + gID } + EndpointUserGuildSettings = func(uID, gID string) string { return EndpointUsers + uID + "/guilds/" + gID + "/settings" } + EndpointUserChannels = func(uID string) string { return EndpointUsers + uID + "/channels" } + EndpointUserDevices = func(uID string) string { return EndpointUsers + uID + "/devices" } + EndpointUserConnections = func(uID string) string { return EndpointUsers + uID + "/connections" } + EndpointUserNotes = func(uID string) string { return EndpointUsers + "@me/notes/" + uID } + + EndpointGuild = func(gID string) string { return EndpointGuilds + gID } + EndpointGuildInivtes = func(gID string) string { return EndpointGuilds + gID + "/invites" } + EndpointGuildChannels = func(gID string) string { return EndpointGuilds + gID + "/channels" } + EndpointGuildMembers = func(gID string) string { return EndpointGuilds + gID + "/members" } + EndpointGuildMember = func(gID, uID string) string { return EndpointGuilds + gID + "/members/" + uID } + EndpointGuildMemberRole = func(gID, uID, rID string) string { return EndpointGuilds + gID + "/members/" + uID + "/roles/" + rID } + EndpointGuildBans = func(gID string) string { return EndpointGuilds + gID + "/bans" } + EndpointGuildBan = func(gID, uID string) string { return EndpointGuilds + gID + "/bans/" + uID } + EndpointGuildIntegrations = func(gID string) string { return EndpointGuilds + gID + "/integrations" } + EndpointGuildIntegration = func(gID, iID string) string { return EndpointGuilds + gID + "/integrations/" + iID } + EndpointGuildIntegrationSync = func(gID, iID string) string { return EndpointGuilds + gID + "/integrations/" + iID + "/sync" } + EndpointGuildRoles = func(gID string) string { return EndpointGuilds + gID + "/roles" } + EndpointGuildRole = func(gID, rID string) string { return EndpointGuilds + gID + "/roles/" + rID } + EndpointGuildInvites = func(gID string) string { return EndpointGuilds + gID + "/invites" } + EndpointGuildEmbed = func(gID string) string { return EndpointGuilds + gID + "/embed" } + EndpointGuildPrune = func(gID string) string { return EndpointGuilds + gID + "/prune" } + EndpointGuildIcon = func(gID, hash string) string { return EndpointCDNIcons + gID + "/" + hash + ".png" } + EndpointGuildSplash = func(gID, hash string) string { return EndpointCDNSplashes + gID + "/" + hash + ".png" } + EndpointGuildWebhooks = func(gID string) string { return EndpointGuilds + gID + "/webhooks" } + + EndpointChannel = func(cID string) string { return EndpointChannels + cID } + EndpointChannelPermissions = func(cID string) string { return EndpointChannels + cID + "/permissions" } + EndpointChannelPermission = func(cID, tID string) string { return EndpointChannels + cID + "/permissions/" + tID } + EndpointChannelInvites = func(cID string) string { return EndpointChannels + cID + "/invites" } + EndpointChannelTyping = func(cID string) string { return EndpointChannels + cID + "/typing" } + EndpointChannelMessages = func(cID string) string { return EndpointChannels + cID + "/messages" } + EndpointChannelMessage = func(cID, mID string) string { return EndpointChannels + cID + "/messages/" + mID } + EndpointChannelMessageAck = func(cID, mID string) string { return EndpointChannels + cID + "/messages/" + mID + "/ack" } + EndpointChannelMessagesBulkDelete = func(cID string) string { return EndpointChannel(cID) + "/messages/bulk_delete" } + EndpointChannelMessagesPins = func(cID string) string { return EndpointChannel(cID) + "/pins" } + EndpointChannelMessagePin = func(cID, mID string) string { return EndpointChannel(cID) + "/pins/" + mID } + + EndpointGroupIcon = func(cID, hash string) string { return EndpointCDNChannelIcons + cID + "/" + hash + ".png" } + + EndpointChannelWebhooks = func(cID string) string { return EndpointChannel(cID) + "/webhooks" } + EndpointWebhook = func(wID string) string { return EndpointWebhooks + wID } + EndpointWebhookToken = func(wID, token string) string { return EndpointWebhooks + wID + "/" + token } + + EndpointMessageReactions = func(cID, mID, eID string) string { + return EndpointChannelMessage(cID, mID) + "/reactions/" + eID + } + EndpointMessageReaction = func(cID, mID, eID, uID string) string { + return EndpointMessageReactions(cID, mID, eID) + "/" + uID + } + + EndpointRelationships = func() string { return EndpointUsers + "@me" + "/relationships" } + EndpointRelationship = func(uID string) string { return EndpointRelationships() + "/" + uID } + EndpointRelationshipsMutual = func(uID string) string { return EndpointUsers + uID + "/relationships" } + + EndpointInvite = func(iID string) string { return EndpointAPI + "invite/" + iID } + + EndpointIntegrationsJoin = func(iID string) string { return EndpointAPI + "integrations/" + iID + "/join" } + + EndpointEmoji = func(eID string) string { return EndpointAPI + "emojis/" + eID + ".png" } + + EndpointOauth2 = EndpointAPI + "oauth2/" + EndpointApplications = EndpointOauth2 + "applications" + EndpointApplication = func(aID string) string { return EndpointApplications + "/" + aID } + EndpointApplicationsBot = func(aID string) string { return EndpointApplications + "/" + aID + "/bot" } +) diff --git a/vendor/github.com/bwmarrin/discordgo/event.go b/vendor/github.com/bwmarrin/discordgo/event.go new file mode 100644 index 0000000..906c2aa --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/event.go @@ -0,0 +1,230 @@ +package discordgo + +// EventHandler is an interface for Discord events. +type EventHandler interface { + // Type returns the type of event this handler belongs to. + Type() string + + // Handle is called whenever an event of Type() happens. + // It is the recievers responsibility to type assert that the interface + // is the expected struct. + Handle(*Session, interface{}) +} + +// EventInterfaceProvider is an interface for providing empty interfaces for +// Discord events. +type EventInterfaceProvider interface { + // Type is the type of event this handler belongs to. + Type() string + + // New returns a new instance of the struct this event handler handles. + // This is called once per event. + // The struct is provided to all handlers of the same Type(). + New() interface{} +} + +// interfaceEventType is the event handler type for interface{} events. +const interfaceEventType = "__INTERFACE__" + +// interfaceEventHandler is an event handler for interface{} events. +type interfaceEventHandler func(*Session, interface{}) + +// Type returns the event type for interface{} events. +func (eh interfaceEventHandler) Type() string { + return interfaceEventType +} + +// Handle is the handler for an interface{} event. +func (eh interfaceEventHandler) Handle(s *Session, i interface{}) { + eh(s, i) +} + +var registeredInterfaceProviders = map[string]EventInterfaceProvider{} + +// registerInterfaceProvider registers a provider so that DiscordGo can +// access it's New() method. +func registerInterfaceProvider(eh EventInterfaceProvider) { + if _, ok := registeredInterfaceProviders[eh.Type()]; ok { + return + // XXX: + // if we should error here, we need to do something with it. + // fmt.Errorf("event %s already registered", eh.Type()) + } + registeredInterfaceProviders[eh.Type()] = eh + return +} + +// eventHandlerInstance is a wrapper around an event handler, as functions +// cannot be compared directly. +type eventHandlerInstance struct { + eventHandler EventHandler +} + +// addEventHandler adds an event handler that will be fired anytime +// the Discord WSAPI matching eventHandler.Type() fires. +func (s *Session) addEventHandler(eventHandler EventHandler) func() { + s.handlersMu.Lock() + defer s.handlersMu.Unlock() + + if s.handlers == nil { + s.handlers = map[string][]*eventHandlerInstance{} + } + + ehi := &eventHandlerInstance{eventHandler} + s.handlers[eventHandler.Type()] = append(s.handlers[eventHandler.Type()], ehi) + + return func() { + s.removeEventHandlerInstance(eventHandler.Type(), ehi) + } +} + +// addEventHandler adds an event handler that will be fired the next time +// the Discord WSAPI matching eventHandler.Type() fires. +func (s *Session) addEventHandlerOnce(eventHandler EventHandler) func() { + s.handlersMu.Lock() + defer s.handlersMu.Unlock() + + if s.onceHandlers == nil { + s.onceHandlers = map[string][]*eventHandlerInstance{} + } + + ehi := &eventHandlerInstance{eventHandler} + s.onceHandlers[eventHandler.Type()] = append(s.onceHandlers[eventHandler.Type()], ehi) + + return func() { + s.removeEventHandlerInstance(eventHandler.Type(), ehi) + } +} + +// AddHandler allows you to add an event handler that will be fired anytime +// the Discord WSAPI event that matches the function fires. +// events.go contains all the Discord WSAPI events that can be fired. +// eg: +// Session.AddHandler(func(s *discordgo.Session, m *discordgo.MessageCreate) { +// }) +// +// or: +// Session.AddHandler(func(s *discordgo.Session, m *discordgo.PresenceUpdate) { +// }) +// The return value of this method is a function, that when called will remove the +// event handler. +func (s *Session) AddHandler(handler interface{}) func() { + eh := handlerForInterface(handler) + + if eh == nil { + s.log(LogError, "Invalid handler type, handler will never be called") + return func() {} + } + + return s.addEventHandler(eh) +} + +// AddHandlerOnce allows you to add an event handler that will be fired the next time +// the Discord WSAPI event that matches the function fires. +// See AddHandler for more details. +func (s *Session) AddHandlerOnce(handler interface{}) func() { + eh := handlerForInterface(handler) + + if eh == nil { + s.log(LogError, "Invalid handler type, handler will never be called") + return func() {} + } + + return s.addEventHandlerOnce(eh) +} + +// removeEventHandler instance removes an event handler instance. +func (s *Session) removeEventHandlerInstance(t string, ehi *eventHandlerInstance) { + s.handlersMu.Lock() + defer s.handlersMu.Unlock() + + handlers := s.handlers[t] + for i := range handlers { + if handlers[i] == ehi { + s.handlers[t] = append(handlers[:i], handlers[i+1:]...) + } + } + + onceHandlers := s.onceHandlers[t] + for i := range onceHandlers { + if onceHandlers[i] == ehi { + s.onceHandlers[t] = append(onceHandlers[:i], handlers[i+1:]...) + } + } +} + +// Handles calling permanent and once handlers for an event type. +func (s *Session) handle(t string, i interface{}) { + for _, eh := range s.handlers[t] { + go eh.eventHandler.Handle(s, i) + } + + if len(s.onceHandlers[t]) > 0 { + for _, eh := range s.onceHandlers[t] { + go eh.eventHandler.Handle(s, i) + } + s.onceHandlers[t] = nil + } +} + +// Handles an event type by calling internal methods, firing handlers and firing the +// interface{} event. +func (s *Session) handleEvent(t string, i interface{}) { + s.handlersMu.RLock() + defer s.handlersMu.RUnlock() + + // All events are dispatched internally first. + s.onInterface(i) + + // Then they are dispatched to anyone handling interface{} events. + s.handle(interfaceEventType, i) + + // Finally they are dispatched to any typed handlers. + s.handle(t, i) +} + +// setGuildIds will set the GuildID on all the members of a guild. +// This is done as event data does not have it set. +func setGuildIds(g *Guild) { + for _, c := range g.Channels { + c.GuildID = g.ID + } + + for _, m := range g.Members { + m.GuildID = g.ID + } + + for _, vs := range g.VoiceStates { + vs.GuildID = g.ID + } +} + +// onInterface handles all internal events and routes them to the appropriate internal handler. +func (s *Session) onInterface(i interface{}) { + switch t := i.(type) { + case *Ready: + for _, g := range t.Guilds { + setGuildIds(g) + } + s.onReady(t) + case *GuildCreate: + setGuildIds(t.Guild) + case *GuildUpdate: + setGuildIds(t.Guild) + case *VoiceServerUpdate: + go s.onVoiceServerUpdate(t) + case *VoiceStateUpdate: + go s.onVoiceStateUpdate(t) + } + err := s.State.onInterface(s, i) + if err != nil { + s.log(LogDebug, "error dispatching internal event, %s", err) + } +} + +// onReady handles the ready event. +func (s *Session) onReady(r *Ready) { + + // Store the SessionID within the Session struct. + s.sessionID = r.SessionID +} diff --git a/vendor/github.com/bwmarrin/discordgo/eventhandlers.go b/vendor/github.com/bwmarrin/discordgo/eventhandlers.go new file mode 100644 index 0000000..5cc157d --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/eventhandlers.go @@ -0,0 +1,1030 @@ +// Code generated by \"eventhandlers\"; DO NOT EDIT +// See events.go + +package discordgo + +// Following are all the event types. +// Event type values are used to match the events returned by Discord. +// EventTypes surrounded by __ are synthetic and are internal to DiscordGo. +const ( + channelCreateEventType = "CHANNEL_CREATE" + channelDeleteEventType = "CHANNEL_DELETE" + channelPinsUpdateEventType = "CHANNEL_PINS_UPDATE" + channelUpdateEventType = "CHANNEL_UPDATE" + connectEventType = "__CONNECT__" + disconnectEventType = "__DISCONNECT__" + eventEventType = "__EVENT__" + guildBanAddEventType = "GUILD_BAN_ADD" + guildBanRemoveEventType = "GUILD_BAN_REMOVE" + guildCreateEventType = "GUILD_CREATE" + guildDeleteEventType = "GUILD_DELETE" + guildEmojisUpdateEventType = "GUILD_EMOJIS_UPDATE" + guildIntegrationsUpdateEventType = "GUILD_INTEGRATIONS_UPDATE" + guildMemberAddEventType = "GUILD_MEMBER_ADD" + guildMemberRemoveEventType = "GUILD_MEMBER_REMOVE" + guildMemberUpdateEventType = "GUILD_MEMBER_UPDATE" + guildMembersChunkEventType = "GUILD_MEMBERS_CHUNK" + guildRoleCreateEventType = "GUILD_ROLE_CREATE" + guildRoleDeleteEventType = "GUILD_ROLE_DELETE" + guildRoleUpdateEventType = "GUILD_ROLE_UPDATE" + guildUpdateEventType = "GUILD_UPDATE" + messageAckEventType = "MESSAGE_ACK" + messageCreateEventType = "MESSAGE_CREATE" + messageDeleteEventType = "MESSAGE_DELETE" + messageDeleteBulkEventType = "MESSAGE_DELETE_BULK" + messageReactionAddEventType = "MESSAGE_REACTION_ADD" + messageReactionRemoveEventType = "MESSAGE_REACTION_REMOVE" + messageReactionRemoveAllEventType = "MESSAGE_REACTION_REMOVE_ALL" + messageUpdateEventType = "MESSAGE_UPDATE" + presenceUpdateEventType = "PRESENCE_UPDATE" + presencesReplaceEventType = "PRESENCES_REPLACE" + rateLimitEventType = "__RATE_LIMIT__" + readyEventType = "READY" + relationshipAddEventType = "RELATIONSHIP_ADD" + relationshipRemoveEventType = "RELATIONSHIP_REMOVE" + resumedEventType = "RESUMED" + typingStartEventType = "TYPING_START" + userGuildSettingsUpdateEventType = "USER_GUILD_SETTINGS_UPDATE" + userNoteUpdateEventType = "USER_NOTE_UPDATE" + userSettingsUpdateEventType = "USER_SETTINGS_UPDATE" + userUpdateEventType = "USER_UPDATE" + voiceServerUpdateEventType = "VOICE_SERVER_UPDATE" + voiceStateUpdateEventType = "VOICE_STATE_UPDATE" +) + +// channelCreateEventHandler is an event handler for ChannelCreate events. +type channelCreateEventHandler func(*Session, *ChannelCreate) + +// Type returns the event type for ChannelCreate events. +func (eh channelCreateEventHandler) Type() string { + return channelCreateEventType +} + +// New returns a new instance of ChannelCreate. +func (eh channelCreateEventHandler) New() interface{} { + return &ChannelCreate{} +} + +// Handle is the handler for ChannelCreate events. +func (eh channelCreateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*ChannelCreate); ok { + eh(s, t) + } +} + +// channelDeleteEventHandler is an event handler for ChannelDelete events. +type channelDeleteEventHandler func(*Session, *ChannelDelete) + +// Type returns the event type for ChannelDelete events. +func (eh channelDeleteEventHandler) Type() string { + return channelDeleteEventType +} + +// New returns a new instance of ChannelDelete. +func (eh channelDeleteEventHandler) New() interface{} { + return &ChannelDelete{} +} + +// Handle is the handler for ChannelDelete events. +func (eh channelDeleteEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*ChannelDelete); ok { + eh(s, t) + } +} + +// channelPinsUpdateEventHandler is an event handler for ChannelPinsUpdate events. +type channelPinsUpdateEventHandler func(*Session, *ChannelPinsUpdate) + +// Type returns the event type for ChannelPinsUpdate events. +func (eh channelPinsUpdateEventHandler) Type() string { + return channelPinsUpdateEventType +} + +// New returns a new instance of ChannelPinsUpdate. +func (eh channelPinsUpdateEventHandler) New() interface{} { + return &ChannelPinsUpdate{} +} + +// Handle is the handler for ChannelPinsUpdate events. +func (eh channelPinsUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*ChannelPinsUpdate); ok { + eh(s, t) + } +} + +// channelUpdateEventHandler is an event handler for ChannelUpdate events. +type channelUpdateEventHandler func(*Session, *ChannelUpdate) + +// Type returns the event type for ChannelUpdate events. +func (eh channelUpdateEventHandler) Type() string { + return channelUpdateEventType +} + +// New returns a new instance of ChannelUpdate. +func (eh channelUpdateEventHandler) New() interface{} { + return &ChannelUpdate{} +} + +// Handle is the handler for ChannelUpdate events. +func (eh channelUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*ChannelUpdate); ok { + eh(s, t) + } +} + +// connectEventHandler is an event handler for Connect events. +type connectEventHandler func(*Session, *Connect) + +// Type returns the event type for Connect events. +func (eh connectEventHandler) Type() string { + return connectEventType +} + +// Handle is the handler for Connect events. +func (eh connectEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*Connect); ok { + eh(s, t) + } +} + +// disconnectEventHandler is an event handler for Disconnect events. +type disconnectEventHandler func(*Session, *Disconnect) + +// Type returns the event type for Disconnect events. +func (eh disconnectEventHandler) Type() string { + return disconnectEventType +} + +// Handle is the handler for Disconnect events. +func (eh disconnectEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*Disconnect); ok { + eh(s, t) + } +} + +// eventEventHandler is an event handler for Event events. +type eventEventHandler func(*Session, *Event) + +// Type returns the event type for Event events. +func (eh eventEventHandler) Type() string { + return eventEventType +} + +// Handle is the handler for Event events. +func (eh eventEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*Event); ok { + eh(s, t) + } +} + +// guildBanAddEventHandler is an event handler for GuildBanAdd events. +type guildBanAddEventHandler func(*Session, *GuildBanAdd) + +// Type returns the event type for GuildBanAdd events. +func (eh guildBanAddEventHandler) Type() string { + return guildBanAddEventType +} + +// New returns a new instance of GuildBanAdd. +func (eh guildBanAddEventHandler) New() interface{} { + return &GuildBanAdd{} +} + +// Handle is the handler for GuildBanAdd events. +func (eh guildBanAddEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildBanAdd); ok { + eh(s, t) + } +} + +// guildBanRemoveEventHandler is an event handler for GuildBanRemove events. +type guildBanRemoveEventHandler func(*Session, *GuildBanRemove) + +// Type returns the event type for GuildBanRemove events. +func (eh guildBanRemoveEventHandler) Type() string { + return guildBanRemoveEventType +} + +// New returns a new instance of GuildBanRemove. +func (eh guildBanRemoveEventHandler) New() interface{} { + return &GuildBanRemove{} +} + +// Handle is the handler for GuildBanRemove events. +func (eh guildBanRemoveEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildBanRemove); ok { + eh(s, t) + } +} + +// guildCreateEventHandler is an event handler for GuildCreate events. +type guildCreateEventHandler func(*Session, *GuildCreate) + +// Type returns the event type for GuildCreate events. +func (eh guildCreateEventHandler) Type() string { + return guildCreateEventType +} + +// New returns a new instance of GuildCreate. +func (eh guildCreateEventHandler) New() interface{} { + return &GuildCreate{} +} + +// Handle is the handler for GuildCreate events. +func (eh guildCreateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildCreate); ok { + eh(s, t) + } +} + +// guildDeleteEventHandler is an event handler for GuildDelete events. +type guildDeleteEventHandler func(*Session, *GuildDelete) + +// Type returns the event type for GuildDelete events. +func (eh guildDeleteEventHandler) Type() string { + return guildDeleteEventType +} + +// New returns a new instance of GuildDelete. +func (eh guildDeleteEventHandler) New() interface{} { + return &GuildDelete{} +} + +// Handle is the handler for GuildDelete events. +func (eh guildDeleteEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildDelete); ok { + eh(s, t) + } +} + +// guildEmojisUpdateEventHandler is an event handler for GuildEmojisUpdate events. +type guildEmojisUpdateEventHandler func(*Session, *GuildEmojisUpdate) + +// Type returns the event type for GuildEmojisUpdate events. +func (eh guildEmojisUpdateEventHandler) Type() string { + return guildEmojisUpdateEventType +} + +// New returns a new instance of GuildEmojisUpdate. +func (eh guildEmojisUpdateEventHandler) New() interface{} { + return &GuildEmojisUpdate{} +} + +// Handle is the handler for GuildEmojisUpdate events. +func (eh guildEmojisUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildEmojisUpdate); ok { + eh(s, t) + } +} + +// guildIntegrationsUpdateEventHandler is an event handler for GuildIntegrationsUpdate events. +type guildIntegrationsUpdateEventHandler func(*Session, *GuildIntegrationsUpdate) + +// Type returns the event type for GuildIntegrationsUpdate events. +func (eh guildIntegrationsUpdateEventHandler) Type() string { + return guildIntegrationsUpdateEventType +} + +// New returns a new instance of GuildIntegrationsUpdate. +func (eh guildIntegrationsUpdateEventHandler) New() interface{} { + return &GuildIntegrationsUpdate{} +} + +// Handle is the handler for GuildIntegrationsUpdate events. +func (eh guildIntegrationsUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildIntegrationsUpdate); ok { + eh(s, t) + } +} + +// guildMemberAddEventHandler is an event handler for GuildMemberAdd events. +type guildMemberAddEventHandler func(*Session, *GuildMemberAdd) + +// Type returns the event type for GuildMemberAdd events. +func (eh guildMemberAddEventHandler) Type() string { + return guildMemberAddEventType +} + +// New returns a new instance of GuildMemberAdd. +func (eh guildMemberAddEventHandler) New() interface{} { + return &GuildMemberAdd{} +} + +// Handle is the handler for GuildMemberAdd events. +func (eh guildMemberAddEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildMemberAdd); ok { + eh(s, t) + } +} + +// guildMemberRemoveEventHandler is an event handler for GuildMemberRemove events. +type guildMemberRemoveEventHandler func(*Session, *GuildMemberRemove) + +// Type returns the event type for GuildMemberRemove events. +func (eh guildMemberRemoveEventHandler) Type() string { + return guildMemberRemoveEventType +} + +// New returns a new instance of GuildMemberRemove. +func (eh guildMemberRemoveEventHandler) New() interface{} { + return &GuildMemberRemove{} +} + +// Handle is the handler for GuildMemberRemove events. +func (eh guildMemberRemoveEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildMemberRemove); ok { + eh(s, t) + } +} + +// guildMemberUpdateEventHandler is an event handler for GuildMemberUpdate events. +type guildMemberUpdateEventHandler func(*Session, *GuildMemberUpdate) + +// Type returns the event type for GuildMemberUpdate events. +func (eh guildMemberUpdateEventHandler) Type() string { + return guildMemberUpdateEventType +} + +// New returns a new instance of GuildMemberUpdate. +func (eh guildMemberUpdateEventHandler) New() interface{} { + return &GuildMemberUpdate{} +} + +// Handle is the handler for GuildMemberUpdate events. +func (eh guildMemberUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildMemberUpdate); ok { + eh(s, t) + } +} + +// guildMembersChunkEventHandler is an event handler for GuildMembersChunk events. +type guildMembersChunkEventHandler func(*Session, *GuildMembersChunk) + +// Type returns the event type for GuildMembersChunk events. +func (eh guildMembersChunkEventHandler) Type() string { + return guildMembersChunkEventType +} + +// New returns a new instance of GuildMembersChunk. +func (eh guildMembersChunkEventHandler) New() interface{} { + return &GuildMembersChunk{} +} + +// Handle is the handler for GuildMembersChunk events. +func (eh guildMembersChunkEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildMembersChunk); ok { + eh(s, t) + } +} + +// guildRoleCreateEventHandler is an event handler for GuildRoleCreate events. +type guildRoleCreateEventHandler func(*Session, *GuildRoleCreate) + +// Type returns the event type for GuildRoleCreate events. +func (eh guildRoleCreateEventHandler) Type() string { + return guildRoleCreateEventType +} + +// New returns a new instance of GuildRoleCreate. +func (eh guildRoleCreateEventHandler) New() interface{} { + return &GuildRoleCreate{} +} + +// Handle is the handler for GuildRoleCreate events. +func (eh guildRoleCreateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildRoleCreate); ok { + eh(s, t) + } +} + +// guildRoleDeleteEventHandler is an event handler for GuildRoleDelete events. +type guildRoleDeleteEventHandler func(*Session, *GuildRoleDelete) + +// Type returns the event type for GuildRoleDelete events. +func (eh guildRoleDeleteEventHandler) Type() string { + return guildRoleDeleteEventType +} + +// New returns a new instance of GuildRoleDelete. +func (eh guildRoleDeleteEventHandler) New() interface{} { + return &GuildRoleDelete{} +} + +// Handle is the handler for GuildRoleDelete events. +func (eh guildRoleDeleteEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildRoleDelete); ok { + eh(s, t) + } +} + +// guildRoleUpdateEventHandler is an event handler for GuildRoleUpdate events. +type guildRoleUpdateEventHandler func(*Session, *GuildRoleUpdate) + +// Type returns the event type for GuildRoleUpdate events. +func (eh guildRoleUpdateEventHandler) Type() string { + return guildRoleUpdateEventType +} + +// New returns a new instance of GuildRoleUpdate. +func (eh guildRoleUpdateEventHandler) New() interface{} { + return &GuildRoleUpdate{} +} + +// Handle is the handler for GuildRoleUpdate events. +func (eh guildRoleUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildRoleUpdate); ok { + eh(s, t) + } +} + +// guildUpdateEventHandler is an event handler for GuildUpdate events. +type guildUpdateEventHandler func(*Session, *GuildUpdate) + +// Type returns the event type for GuildUpdate events. +func (eh guildUpdateEventHandler) Type() string { + return guildUpdateEventType +} + +// New returns a new instance of GuildUpdate. +func (eh guildUpdateEventHandler) New() interface{} { + return &GuildUpdate{} +} + +// Handle is the handler for GuildUpdate events. +func (eh guildUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*GuildUpdate); ok { + eh(s, t) + } +} + +// messageAckEventHandler is an event handler for MessageAck events. +type messageAckEventHandler func(*Session, *MessageAck) + +// Type returns the event type for MessageAck events. +func (eh messageAckEventHandler) Type() string { + return messageAckEventType +} + +// New returns a new instance of MessageAck. +func (eh messageAckEventHandler) New() interface{} { + return &MessageAck{} +} + +// Handle is the handler for MessageAck events. +func (eh messageAckEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageAck); ok { + eh(s, t) + } +} + +// messageCreateEventHandler is an event handler for MessageCreate events. +type messageCreateEventHandler func(*Session, *MessageCreate) + +// Type returns the event type for MessageCreate events. +func (eh messageCreateEventHandler) Type() string { + return messageCreateEventType +} + +// New returns a new instance of MessageCreate. +func (eh messageCreateEventHandler) New() interface{} { + return &MessageCreate{} +} + +// Handle is the handler for MessageCreate events. +func (eh messageCreateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageCreate); ok { + eh(s, t) + } +} + +// messageDeleteEventHandler is an event handler for MessageDelete events. +type messageDeleteEventHandler func(*Session, *MessageDelete) + +// Type returns the event type for MessageDelete events. +func (eh messageDeleteEventHandler) Type() string { + return messageDeleteEventType +} + +// New returns a new instance of MessageDelete. +func (eh messageDeleteEventHandler) New() interface{} { + return &MessageDelete{} +} + +// Handle is the handler for MessageDelete events. +func (eh messageDeleteEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageDelete); ok { + eh(s, t) + } +} + +// messageDeleteBulkEventHandler is an event handler for MessageDeleteBulk events. +type messageDeleteBulkEventHandler func(*Session, *MessageDeleteBulk) + +// Type returns the event type for MessageDeleteBulk events. +func (eh messageDeleteBulkEventHandler) Type() string { + return messageDeleteBulkEventType +} + +// New returns a new instance of MessageDeleteBulk. +func (eh messageDeleteBulkEventHandler) New() interface{} { + return &MessageDeleteBulk{} +} + +// Handle is the handler for MessageDeleteBulk events. +func (eh messageDeleteBulkEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageDeleteBulk); ok { + eh(s, t) + } +} + +// messageReactionAddEventHandler is an event handler for MessageReactionAdd events. +type messageReactionAddEventHandler func(*Session, *MessageReactionAdd) + +// Type returns the event type for MessageReactionAdd events. +func (eh messageReactionAddEventHandler) Type() string { + return messageReactionAddEventType +} + +// New returns a new instance of MessageReactionAdd. +func (eh messageReactionAddEventHandler) New() interface{} { + return &MessageReactionAdd{} +} + +// Handle is the handler for MessageReactionAdd events. +func (eh messageReactionAddEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageReactionAdd); ok { + eh(s, t) + } +} + +// messageReactionRemoveEventHandler is an event handler for MessageReactionRemove events. +type messageReactionRemoveEventHandler func(*Session, *MessageReactionRemove) + +// Type returns the event type for MessageReactionRemove events. +func (eh messageReactionRemoveEventHandler) Type() string { + return messageReactionRemoveEventType +} + +// New returns a new instance of MessageReactionRemove. +func (eh messageReactionRemoveEventHandler) New() interface{} { + return &MessageReactionRemove{} +} + +// Handle is the handler for MessageReactionRemove events. +func (eh messageReactionRemoveEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageReactionRemove); ok { + eh(s, t) + } +} + +// messageReactionRemoveAllEventHandler is an event handler for MessageReactionRemoveAll events. +type messageReactionRemoveAllEventHandler func(*Session, *MessageReactionRemoveAll) + +// Type returns the event type for MessageReactionRemoveAll events. +func (eh messageReactionRemoveAllEventHandler) Type() string { + return messageReactionRemoveAllEventType +} + +// New returns a new instance of MessageReactionRemoveAll. +func (eh messageReactionRemoveAllEventHandler) New() interface{} { + return &MessageReactionRemoveAll{} +} + +// Handle is the handler for MessageReactionRemoveAll events. +func (eh messageReactionRemoveAllEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageReactionRemoveAll); ok { + eh(s, t) + } +} + +// messageUpdateEventHandler is an event handler for MessageUpdate events. +type messageUpdateEventHandler func(*Session, *MessageUpdate) + +// Type returns the event type for MessageUpdate events. +func (eh messageUpdateEventHandler) Type() string { + return messageUpdateEventType +} + +// New returns a new instance of MessageUpdate. +func (eh messageUpdateEventHandler) New() interface{} { + return &MessageUpdate{} +} + +// Handle is the handler for MessageUpdate events. +func (eh messageUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*MessageUpdate); ok { + eh(s, t) + } +} + +// presenceUpdateEventHandler is an event handler for PresenceUpdate events. +type presenceUpdateEventHandler func(*Session, *PresenceUpdate) + +// Type returns the event type for PresenceUpdate events. +func (eh presenceUpdateEventHandler) Type() string { + return presenceUpdateEventType +} + +// New returns a new instance of PresenceUpdate. +func (eh presenceUpdateEventHandler) New() interface{} { + return &PresenceUpdate{} +} + +// Handle is the handler for PresenceUpdate events. +func (eh presenceUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*PresenceUpdate); ok { + eh(s, t) + } +} + +// presencesReplaceEventHandler is an event handler for PresencesReplace events. +type presencesReplaceEventHandler func(*Session, *PresencesReplace) + +// Type returns the event type for PresencesReplace events. +func (eh presencesReplaceEventHandler) Type() string { + return presencesReplaceEventType +} + +// New returns a new instance of PresencesReplace. +func (eh presencesReplaceEventHandler) New() interface{} { + return &PresencesReplace{} +} + +// Handle is the handler for PresencesReplace events. +func (eh presencesReplaceEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*PresencesReplace); ok { + eh(s, t) + } +} + +// rateLimitEventHandler is an event handler for RateLimit events. +type rateLimitEventHandler func(*Session, *RateLimit) + +// Type returns the event type for RateLimit events. +func (eh rateLimitEventHandler) Type() string { + return rateLimitEventType +} + +// Handle is the handler for RateLimit events. +func (eh rateLimitEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*RateLimit); ok { + eh(s, t) + } +} + +// readyEventHandler is an event handler for Ready events. +type readyEventHandler func(*Session, *Ready) + +// Type returns the event type for Ready events. +func (eh readyEventHandler) Type() string { + return readyEventType +} + +// New returns a new instance of Ready. +func (eh readyEventHandler) New() interface{} { + return &Ready{} +} + +// Handle is the handler for Ready events. +func (eh readyEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*Ready); ok { + eh(s, t) + } +} + +// relationshipAddEventHandler is an event handler for RelationshipAdd events. +type relationshipAddEventHandler func(*Session, *RelationshipAdd) + +// Type returns the event type for RelationshipAdd events. +func (eh relationshipAddEventHandler) Type() string { + return relationshipAddEventType +} + +// New returns a new instance of RelationshipAdd. +func (eh relationshipAddEventHandler) New() interface{} { + return &RelationshipAdd{} +} + +// Handle is the handler for RelationshipAdd events. +func (eh relationshipAddEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*RelationshipAdd); ok { + eh(s, t) + } +} + +// relationshipRemoveEventHandler is an event handler for RelationshipRemove events. +type relationshipRemoveEventHandler func(*Session, *RelationshipRemove) + +// Type returns the event type for RelationshipRemove events. +func (eh relationshipRemoveEventHandler) Type() string { + return relationshipRemoveEventType +} + +// New returns a new instance of RelationshipRemove. +func (eh relationshipRemoveEventHandler) New() interface{} { + return &RelationshipRemove{} +} + +// Handle is the handler for RelationshipRemove events. +func (eh relationshipRemoveEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*RelationshipRemove); ok { + eh(s, t) + } +} + +// resumedEventHandler is an event handler for Resumed events. +type resumedEventHandler func(*Session, *Resumed) + +// Type returns the event type for Resumed events. +func (eh resumedEventHandler) Type() string { + return resumedEventType +} + +// New returns a new instance of Resumed. +func (eh resumedEventHandler) New() interface{} { + return &Resumed{} +} + +// Handle is the handler for Resumed events. +func (eh resumedEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*Resumed); ok { + eh(s, t) + } +} + +// typingStartEventHandler is an event handler for TypingStart events. +type typingStartEventHandler func(*Session, *TypingStart) + +// Type returns the event type for TypingStart events. +func (eh typingStartEventHandler) Type() string { + return typingStartEventType +} + +// New returns a new instance of TypingStart. +func (eh typingStartEventHandler) New() interface{} { + return &TypingStart{} +} + +// Handle is the handler for TypingStart events. +func (eh typingStartEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*TypingStart); ok { + eh(s, t) + } +} + +// userGuildSettingsUpdateEventHandler is an event handler for UserGuildSettingsUpdate events. +type userGuildSettingsUpdateEventHandler func(*Session, *UserGuildSettingsUpdate) + +// Type returns the event type for UserGuildSettingsUpdate events. +func (eh userGuildSettingsUpdateEventHandler) Type() string { + return userGuildSettingsUpdateEventType +} + +// New returns a new instance of UserGuildSettingsUpdate. +func (eh userGuildSettingsUpdateEventHandler) New() interface{} { + return &UserGuildSettingsUpdate{} +} + +// Handle is the handler for UserGuildSettingsUpdate events. +func (eh userGuildSettingsUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*UserGuildSettingsUpdate); ok { + eh(s, t) + } +} + +// userNoteUpdateEventHandler is an event handler for UserNoteUpdate events. +type userNoteUpdateEventHandler func(*Session, *UserNoteUpdate) + +// Type returns the event type for UserNoteUpdate events. +func (eh userNoteUpdateEventHandler) Type() string { + return userNoteUpdateEventType +} + +// New returns a new instance of UserNoteUpdate. +func (eh userNoteUpdateEventHandler) New() interface{} { + return &UserNoteUpdate{} +} + +// Handle is the handler for UserNoteUpdate events. +func (eh userNoteUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*UserNoteUpdate); ok { + eh(s, t) + } +} + +// userSettingsUpdateEventHandler is an event handler for UserSettingsUpdate events. +type userSettingsUpdateEventHandler func(*Session, *UserSettingsUpdate) + +// Type returns the event type for UserSettingsUpdate events. +func (eh userSettingsUpdateEventHandler) Type() string { + return userSettingsUpdateEventType +} + +// New returns a new instance of UserSettingsUpdate. +func (eh userSettingsUpdateEventHandler) New() interface{} { + return &UserSettingsUpdate{} +} + +// Handle is the handler for UserSettingsUpdate events. +func (eh userSettingsUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*UserSettingsUpdate); ok { + eh(s, t) + } +} + +// userUpdateEventHandler is an event handler for UserUpdate events. +type userUpdateEventHandler func(*Session, *UserUpdate) + +// Type returns the event type for UserUpdate events. +func (eh userUpdateEventHandler) Type() string { + return userUpdateEventType +} + +// New returns a new instance of UserUpdate. +func (eh userUpdateEventHandler) New() interface{} { + return &UserUpdate{} +} + +// Handle is the handler for UserUpdate events. +func (eh userUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*UserUpdate); ok { + eh(s, t) + } +} + +// voiceServerUpdateEventHandler is an event handler for VoiceServerUpdate events. +type voiceServerUpdateEventHandler func(*Session, *VoiceServerUpdate) + +// Type returns the event type for VoiceServerUpdate events. +func (eh voiceServerUpdateEventHandler) Type() string { + return voiceServerUpdateEventType +} + +// New returns a new instance of VoiceServerUpdate. +func (eh voiceServerUpdateEventHandler) New() interface{} { + return &VoiceServerUpdate{} +} + +// Handle is the handler for VoiceServerUpdate events. +func (eh voiceServerUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*VoiceServerUpdate); ok { + eh(s, t) + } +} + +// voiceStateUpdateEventHandler is an event handler for VoiceStateUpdate events. +type voiceStateUpdateEventHandler func(*Session, *VoiceStateUpdate) + +// Type returns the event type for VoiceStateUpdate events. +func (eh voiceStateUpdateEventHandler) Type() string { + return voiceStateUpdateEventType +} + +// New returns a new instance of VoiceStateUpdate. +func (eh voiceStateUpdateEventHandler) New() interface{} { + return &VoiceStateUpdate{} +} + +// Handle is the handler for VoiceStateUpdate events. +func (eh voiceStateUpdateEventHandler) Handle(s *Session, i interface{}) { + if t, ok := i.(*VoiceStateUpdate); ok { + eh(s, t) + } +} + +func handlerForInterface(handler interface{}) EventHandler { + switch v := handler.(type) { + case func(*Session, interface{}): + return interfaceEventHandler(v) + case func(*Session, *ChannelCreate): + return channelCreateEventHandler(v) + case func(*Session, *ChannelDelete): + return channelDeleteEventHandler(v) + case func(*Session, *ChannelPinsUpdate): + return channelPinsUpdateEventHandler(v) + case func(*Session, *ChannelUpdate): + return channelUpdateEventHandler(v) + case func(*Session, *Connect): + return connectEventHandler(v) + case func(*Session, *Disconnect): + return disconnectEventHandler(v) + case func(*Session, *Event): + return eventEventHandler(v) + case func(*Session, *GuildBanAdd): + return guildBanAddEventHandler(v) + case func(*Session, *GuildBanRemove): + return guildBanRemoveEventHandler(v) + case func(*Session, *GuildCreate): + return guildCreateEventHandler(v) + case func(*Session, *GuildDelete): + return guildDeleteEventHandler(v) + case func(*Session, *GuildEmojisUpdate): + return guildEmojisUpdateEventHandler(v) + case func(*Session, *GuildIntegrationsUpdate): + return guildIntegrationsUpdateEventHandler(v) + case func(*Session, *GuildMemberAdd): + return guildMemberAddEventHandler(v) + case func(*Session, *GuildMemberRemove): + return guildMemberRemoveEventHandler(v) + case func(*Session, *GuildMemberUpdate): + return guildMemberUpdateEventHandler(v) + case func(*Session, *GuildMembersChunk): + return guildMembersChunkEventHandler(v) + case func(*Session, *GuildRoleCreate): + return guildRoleCreateEventHandler(v) + case func(*Session, *GuildRoleDelete): + return guildRoleDeleteEventHandler(v) + case func(*Session, *GuildRoleUpdate): + return guildRoleUpdateEventHandler(v) + case func(*Session, *GuildUpdate): + return guildUpdateEventHandler(v) + case func(*Session, *MessageAck): + return messageAckEventHandler(v) + case func(*Session, *MessageCreate): + return messageCreateEventHandler(v) + case func(*Session, *MessageDelete): + return messageDeleteEventHandler(v) + case func(*Session, *MessageDeleteBulk): + return messageDeleteBulkEventHandler(v) + case func(*Session, *MessageReactionAdd): + return messageReactionAddEventHandler(v) + case func(*Session, *MessageReactionRemove): + return messageReactionRemoveEventHandler(v) + case func(*Session, *MessageReactionRemoveAll): + return messageReactionRemoveAllEventHandler(v) + case func(*Session, *MessageUpdate): + return messageUpdateEventHandler(v) + case func(*Session, *PresenceUpdate): + return presenceUpdateEventHandler(v) + case func(*Session, *PresencesReplace): + return presencesReplaceEventHandler(v) + case func(*Session, *RateLimit): + return rateLimitEventHandler(v) + case func(*Session, *Ready): + return readyEventHandler(v) + case func(*Session, *RelationshipAdd): + return relationshipAddEventHandler(v) + case func(*Session, *RelationshipRemove): + return relationshipRemoveEventHandler(v) + case func(*Session, *Resumed): + return resumedEventHandler(v) + case func(*Session, *TypingStart): + return typingStartEventHandler(v) + case func(*Session, *UserGuildSettingsUpdate): + return userGuildSettingsUpdateEventHandler(v) + case func(*Session, *UserNoteUpdate): + return userNoteUpdateEventHandler(v) + case func(*Session, *UserSettingsUpdate): + return userSettingsUpdateEventHandler(v) + case func(*Session, *UserUpdate): + return userUpdateEventHandler(v) + case func(*Session, *VoiceServerUpdate): + return voiceServerUpdateEventHandler(v) + case func(*Session, *VoiceStateUpdate): + return voiceStateUpdateEventHandler(v) + } + + return nil +} + +func init() { + registerInterfaceProvider(channelCreateEventHandler(nil)) + registerInterfaceProvider(channelDeleteEventHandler(nil)) + registerInterfaceProvider(channelPinsUpdateEventHandler(nil)) + registerInterfaceProvider(channelUpdateEventHandler(nil)) + registerInterfaceProvider(guildBanAddEventHandler(nil)) + registerInterfaceProvider(guildBanRemoveEventHandler(nil)) + registerInterfaceProvider(guildCreateEventHandler(nil)) + registerInterfaceProvider(guildDeleteEventHandler(nil)) + registerInterfaceProvider(guildEmojisUpdateEventHandler(nil)) + registerInterfaceProvider(guildIntegrationsUpdateEventHandler(nil)) + registerInterfaceProvider(guildMemberAddEventHandler(nil)) + registerInterfaceProvider(guildMemberRemoveEventHandler(nil)) + registerInterfaceProvider(guildMemberUpdateEventHandler(nil)) + registerInterfaceProvider(guildMembersChunkEventHandler(nil)) + registerInterfaceProvider(guildRoleCreateEventHandler(nil)) + registerInterfaceProvider(guildRoleDeleteEventHandler(nil)) + registerInterfaceProvider(guildRoleUpdateEventHandler(nil)) + registerInterfaceProvider(guildUpdateEventHandler(nil)) + registerInterfaceProvider(messageAckEventHandler(nil)) + registerInterfaceProvider(messageCreateEventHandler(nil)) + registerInterfaceProvider(messageDeleteEventHandler(nil)) + registerInterfaceProvider(messageDeleteBulkEventHandler(nil)) + registerInterfaceProvider(messageReactionAddEventHandler(nil)) + registerInterfaceProvider(messageReactionRemoveEventHandler(nil)) + registerInterfaceProvider(messageReactionRemoveAllEventHandler(nil)) + registerInterfaceProvider(messageUpdateEventHandler(nil)) + registerInterfaceProvider(presenceUpdateEventHandler(nil)) + registerInterfaceProvider(presencesReplaceEventHandler(nil)) + registerInterfaceProvider(readyEventHandler(nil)) + registerInterfaceProvider(relationshipAddEventHandler(nil)) + registerInterfaceProvider(relationshipRemoveEventHandler(nil)) + registerInterfaceProvider(resumedEventHandler(nil)) + registerInterfaceProvider(typingStartEventHandler(nil)) + registerInterfaceProvider(userGuildSettingsUpdateEventHandler(nil)) + registerInterfaceProvider(userNoteUpdateEventHandler(nil)) + registerInterfaceProvider(userSettingsUpdateEventHandler(nil)) + registerInterfaceProvider(userUpdateEventHandler(nil)) + registerInterfaceProvider(voiceServerUpdateEventHandler(nil)) + registerInterfaceProvider(voiceStateUpdateEventHandler(nil)) +} diff --git a/vendor/github.com/bwmarrin/discordgo/events.go b/vendor/github.com/bwmarrin/discordgo/events.go new file mode 100644 index 0000000..c78fbdd --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/events.go @@ -0,0 +1,253 @@ +package discordgo + +import ( + "encoding/json" +) + +// This file contains all the possible structs that can be +// handled by AddHandler/EventHandler. +// DO NOT ADD ANYTHING BUT EVENT HANDLER STRUCTS TO THIS FILE. +//go:generate go run tools/cmd/eventhandlers/main.go + +// Connect is the data for a Connect event. +// This is a sythetic event and is not dispatched by Discord. +type Connect struct{} + +// Disconnect is the data for a Disconnect event. +// This is a sythetic event and is not dispatched by Discord. +type Disconnect struct{} + +// RateLimit is the data for a RateLimit event. +// This is a sythetic event and is not dispatched by Discord. +type RateLimit struct { + *TooManyRequests + URL string +} + +// Event provides a basic initial struct for all websocket events. +type Event struct { + Operation int `json:"op"` + Sequence int64 `json:"s"` + Type string `json:"t"` + RawData json.RawMessage `json:"d"` + // Struct contains one of the other types in this file. + Struct interface{} `json:"-"` +} + +// A Ready stores all data for the websocket READY event. +type Ready struct { + Version int `json:"v"` + SessionID string `json:"session_id"` + User *User `json:"user"` + ReadState []*ReadState `json:"read_state"` + PrivateChannels []*Channel `json:"private_channels"` + Guilds []*Guild `json:"guilds"` + + // Undocumented fields + Settings *Settings `json:"user_settings"` + UserGuildSettings []*UserGuildSettings `json:"user_guild_settings"` + Relationships []*Relationship `json:"relationships"` + Presences []*Presence `json:"presences"` + Notes map[string]string `json:"notes"` +} + +// ChannelCreate is the data for a ChannelCreate event. +type ChannelCreate struct { + *Channel +} + +// ChannelUpdate is the data for a ChannelUpdate event. +type ChannelUpdate struct { + *Channel +} + +// ChannelDelete is the data for a ChannelDelete event. +type ChannelDelete struct { + *Channel +} + +// ChannelPinsUpdate stores data for a ChannelPinsUpdate event. +type ChannelPinsUpdate struct { + LastPinTimestamp string `json:"last_pin_timestamp"` + ChannelID string `json:"channel_id"` +} + +// GuildCreate is the data for a GuildCreate event. +type GuildCreate struct { + *Guild +} + +// GuildUpdate is the data for a GuildUpdate event. +type GuildUpdate struct { + *Guild +} + +// GuildDelete is the data for a GuildDelete event. +type GuildDelete struct { + *Guild +} + +// GuildBanAdd is the data for a GuildBanAdd event. +type GuildBanAdd struct { + User *User `json:"user"` + GuildID string `json:"guild_id"` +} + +// GuildBanRemove is the data for a GuildBanRemove event. +type GuildBanRemove struct { + User *User `json:"user"` + GuildID string `json:"guild_id"` +} + +// GuildMemberAdd is the data for a GuildMemberAdd event. +type GuildMemberAdd struct { + *Member +} + +// GuildMemberUpdate is the data for a GuildMemberUpdate event. +type GuildMemberUpdate struct { + *Member +} + +// GuildMemberRemove is the data for a GuildMemberRemove event. +type GuildMemberRemove struct { + *Member +} + +// GuildRoleCreate is the data for a GuildRoleCreate event. +type GuildRoleCreate struct { + *GuildRole +} + +// GuildRoleUpdate is the data for a GuildRoleUpdate event. +type GuildRoleUpdate struct { + *GuildRole +} + +// A GuildRoleDelete is the data for a GuildRoleDelete event. +type GuildRoleDelete struct { + RoleID string `json:"role_id"` + GuildID string `json:"guild_id"` +} + +// A GuildEmojisUpdate is the data for a guild emoji update event. +type GuildEmojisUpdate struct { + GuildID string `json:"guild_id"` + Emojis []*Emoji `json:"emojis"` +} + +// A GuildMembersChunk is the data for a GuildMembersChunk event. +type GuildMembersChunk struct { + GuildID string `json:"guild_id"` + Members []*Member `json:"members"` +} + +// GuildIntegrationsUpdate is the data for a GuildIntegrationsUpdate event. +type GuildIntegrationsUpdate struct { + GuildID string `json:"guild_id"` +} + +// MessageAck is the data for a MessageAck event. +type MessageAck struct { + MessageID string `json:"message_id"` + ChannelID string `json:"channel_id"` +} + +// MessageCreate is the data for a MessageCreate event. +type MessageCreate struct { + *Message +} + +// MessageUpdate is the data for a MessageUpdate event. +type MessageUpdate struct { + *Message +} + +// MessageDelete is the data for a MessageDelete event. +type MessageDelete struct { + *Message +} + +// MessageReactionAdd is the data for a MessageReactionAdd event. +type MessageReactionAdd struct { + *MessageReaction +} + +// MessageReactionRemove is the data for a MessageReactionRemove event. +type MessageReactionRemove struct { + *MessageReaction +} + +// MessageReactionRemoveAll is the data for a MessageReactionRemoveAll event. +type MessageReactionRemoveAll struct { + *MessageReaction +} + +// PresencesReplace is the data for a PresencesReplace event. +type PresencesReplace []*Presence + +// PresenceUpdate is the data for a PresenceUpdate event. +type PresenceUpdate struct { + Presence + GuildID string `json:"guild_id"` + Roles []string `json:"roles"` +} + +// Resumed is the data for a Resumed event. +type Resumed struct { + Trace []string `json:"_trace"` +} + +// RelationshipAdd is the data for a RelationshipAdd event. +type RelationshipAdd struct { + *Relationship +} + +// RelationshipRemove is the data for a RelationshipRemove event. +type RelationshipRemove struct { + *Relationship +} + +// TypingStart is the data for a TypingStart event. +type TypingStart struct { + UserID string `json:"user_id"` + ChannelID string `json:"channel_id"` + Timestamp int `json:"timestamp"` +} + +// UserUpdate is the data for a UserUpdate event. +type UserUpdate struct { + *User +} + +// UserSettingsUpdate is the data for a UserSettingsUpdate event. +type UserSettingsUpdate map[string]interface{} + +// UserGuildSettingsUpdate is the data for a UserGuildSettingsUpdate event. +type UserGuildSettingsUpdate struct { + *UserGuildSettings +} + +// UserNoteUpdate is the data for a UserNoteUpdate event. +type UserNoteUpdate struct { + ID string `json:"id"` + Note string `json:"note"` +} + +// VoiceServerUpdate is the data for a VoiceServerUpdate event. +type VoiceServerUpdate struct { + Token string `json:"token"` + GuildID string `json:"guild_id"` + Endpoint string `json:"endpoint"` +} + +// VoiceStateUpdate is the data for a VoiceStateUpdate event. +type VoiceStateUpdate struct { + *VoiceState +} + +// MessageDeleteBulk is the data for a MessageDeleteBulk event +type MessageDeleteBulk struct { + Messages []string `json:"ids"` + ChannelID string `json:"channel_id"` +} diff --git a/vendor/github.com/bwmarrin/discordgo/logging.go b/vendor/github.com/bwmarrin/discordgo/logging.go new file mode 100644 index 0000000..70d78d6 --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/logging.go @@ -0,0 +1,95 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains code related to discordgo package logging + +package discordgo + +import ( + "fmt" + "log" + "runtime" + "strings" +) + +const ( + + // LogError level is used for critical errors that could lead to data loss + // or panic that would not be returned to a calling function. + LogError int = iota + + // LogWarning level is used for very abnormal events and errors that are + // also returend to a calling function. + LogWarning + + // LogInformational level is used for normal non-error activity + LogInformational + + // LogDebug level is for very detailed non-error activity. This is + // very spammy and will impact performance. + LogDebug +) + +// msglog provides package wide logging consistancy for discordgo +// the format, a... portion this command follows that of fmt.Printf +// msgL : LogLevel of the message +// caller : 1 + the number of callers away from the message source +// format : Printf style message format +// a ... : comma seperated list of values to pass +func msglog(msgL, caller int, format string, a ...interface{}) { + + pc, file, line, _ := runtime.Caller(caller) + + files := strings.Split(file, "/") + file = files[len(files)-1] + + name := runtime.FuncForPC(pc).Name() + fns := strings.Split(name, ".") + name = fns[len(fns)-1] + + msg := fmt.Sprintf(format, a...) + + log.Printf("[DG%d] %s:%d:%s() %s\n", msgL, file, line, name, msg) +} + +// helper function that wraps msglog for the Session struct +// This adds a check to insure the message is only logged +// if the session log level is equal or higher than the +// message log level +func (s *Session) log(msgL int, format string, a ...interface{}) { + + if msgL > s.LogLevel { + return + } + + msglog(msgL, 2, format, a...) +} + +// helper function that wraps msglog for the VoiceConnection struct +// This adds a check to insure the message is only logged +// if the voice connection log level is equal or higher than the +// message log level +func (v *VoiceConnection) log(msgL int, format string, a ...interface{}) { + + if msgL > v.LogLevel { + return + } + + msglog(msgL, 2, format, a...) +} + +// printJSON is a helper function to display JSON data in a easy to read format. +/* NOT USED ATM +func printJSON(body []byte) { + var prettyJSON bytes.Buffer + error := json.Indent(&prettyJSON, body, "", "\t") + if error != nil { + log.Print("JSON parse error: ", error) + } + log.Println(string(prettyJSON.Bytes())) +} +*/ diff --git a/vendor/github.com/bwmarrin/discordgo/message.go b/vendor/github.com/bwmarrin/discordgo/message.go new file mode 100644 index 0000000..13c2da0 --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/message.go @@ -0,0 +1,180 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains code related to the Message struct + +package discordgo + +import ( + "fmt" + "io" + "regexp" +) + +// A Message stores all data related to a specific Discord message. +type Message struct { + ID string `json:"id"` + ChannelID string `json:"channel_id"` + Content string `json:"content"` + Timestamp Timestamp `json:"timestamp"` + EditedTimestamp Timestamp `json:"edited_timestamp"` + MentionRoles []string `json:"mention_roles"` + Tts bool `json:"tts"` + MentionEveryone bool `json:"mention_everyone"` + Author *User `json:"author"` + Attachments []*MessageAttachment `json:"attachments"` + Embeds []*MessageEmbed `json:"embeds"` + Mentions []*User `json:"mentions"` + Reactions []*MessageReactions `json:"reactions"` +} + +// File stores info about files you e.g. send in messages. +type File struct { + Name string + Reader io.Reader +} + +// MessageSend stores all parameters you can send with ChannelMessageSendComplex. +type MessageSend struct { + Content string `json:"content,omitempty"` + Embed *MessageEmbed `json:"embed,omitempty"` + Tts bool `json:"tts"` + File *File `json:"file"` +} + +// MessageEdit is used to chain parameters via ChannelMessageEditComplex, which +// is also where you should get the instance from. +type MessageEdit struct { + Content *string `json:"content,omitempty"` + Embed *MessageEmbed `json:"embed,omitempty"` + + ID string + Channel string +} + +// NewMessageEdit returns a MessageEdit struct, initialized +// with the Channel and ID. +func NewMessageEdit(channelID string, messageID string) *MessageEdit { + return &MessageEdit{ + Channel: channelID, + ID: messageID, + } +} + +// SetContent is the same as setting the variable Content, +// except it doesn't take a pointer. +func (m *MessageEdit) SetContent(str string) *MessageEdit { + m.Content = &str + return m +} + +// SetEmbed is a convenience function for setting the embed, +// so you can chain commands. +func (m *MessageEdit) SetEmbed(embed *MessageEmbed) *MessageEdit { + m.Embed = embed + return m +} + +// A MessageAttachment stores data for message attachments. +type MessageAttachment struct { + ID string `json:"id"` + URL string `json:"url"` + ProxyURL string `json:"proxy_url"` + Filename string `json:"filename"` + Width int `json:"width"` + Height int `json:"height"` + Size int `json:"size"` +} + +// MessageEmbedFooter is a part of a MessageEmbed struct. +type MessageEmbedFooter struct { + Text string `json:"text,omitempty"` + IconURL string `json:"icon_url,omitempty"` + ProxyIconURL string `json:"proxy_icon_url,omitempty"` +} + +// MessageEmbedImage is a part of a MessageEmbed struct. +type MessageEmbedImage struct { + URL string `json:"url,omitempty"` + ProxyURL string `json:"proxy_url,omitempty"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` +} + +// MessageEmbedThumbnail is a part of a MessageEmbed struct. +type MessageEmbedThumbnail struct { + URL string `json:"url,omitempty"` + ProxyURL string `json:"proxy_url,omitempty"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` +} + +// MessageEmbedVideo is a part of a MessageEmbed struct. +type MessageEmbedVideo struct { + URL string `json:"url,omitempty"` + ProxyURL string `json:"proxy_url,omitempty"` + Width int `json:"width,omitempty"` + Height int `json:"height,omitempty"` +} + +// MessageEmbedProvider is a part of a MessageEmbed struct. +type MessageEmbedProvider struct { + URL string `json:"url,omitempty"` + Name string `json:"name,omitempty"` +} + +// MessageEmbedAuthor is a part of a MessageEmbed struct. +type MessageEmbedAuthor struct { + URL string `json:"url,omitempty"` + Name string `json:"name,omitempty"` + IconURL string `json:"icon_url,omitempty"` + ProxyIconURL string `json:"proxy_icon_url,omitempty"` +} + +// MessageEmbedField is a part of a MessageEmbed struct. +type MessageEmbedField struct { + Name string `json:"name,omitempty"` + Value string `json:"value,omitempty"` + Inline bool `json:"inline,omitempty"` +} + +// An MessageEmbed stores data for message embeds. +type MessageEmbed struct { + URL string `json:"url,omitempty"` + Type string `json:"type,omitempty"` + Title string `json:"title,omitempty"` + Description string `json:"description,omitempty"` + Timestamp string `json:"timestamp,omitempty"` + Color int `json:"color,omitempty"` + Footer *MessageEmbedFooter `json:"footer,omitempty"` + Image *MessageEmbedImage `json:"image,omitempty"` + Thumbnail *MessageEmbedThumbnail `json:"thumbnail,omitempty"` + Video *MessageEmbedVideo `json:"video,omitempty"` + Provider *MessageEmbedProvider `json:"provider,omitempty"` + Author *MessageEmbedAuthor `json:"author,omitempty"` + Fields []*MessageEmbedField `json:"fields,omitempty"` +} + +// MessageReactions holds a reactions object for a message. +type MessageReactions struct { + Count int `json:"count"` + Me bool `json:"me"` + Emoji *Emoji `json:"emoji"` +} + +// ContentWithMentionsReplaced will replace all @ mentions with the +// username of the mention. +func (m *Message) ContentWithMentionsReplaced() string { + if m.Mentions == nil { + return m.Content + } + content := m.Content + for _, user := range m.Mentions { + content = regexp.MustCompile(fmt.Sprintf("<@!?(%s)>", user.ID)).ReplaceAllString(content, "@"+user.Username) + } + return content +} diff --git a/vendor/github.com/bwmarrin/discordgo/oauth2.go b/vendor/github.com/bwmarrin/discordgo/oauth2.go new file mode 100644 index 0000000..108b32f --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/oauth2.go @@ -0,0 +1,126 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains functions related to Discord OAuth2 endpoints + +package discordgo + +// ------------------------------------------------------------------------------------------------ +// Code specific to Discord OAuth2 Applications +// ------------------------------------------------------------------------------------------------ + +// An Application struct stores values for a Discord OAuth2 Application +type Application struct { + ID string `json:"id,omitempty"` + Name string `json:"name"` + Description string `json:"description,omitempty"` + Icon string `json:"icon,omitempty"` + Secret string `json:"secret,omitempty"` + RedirectURIs *[]string `json:"redirect_uris,omitempty"` + BotRequireCodeGrant bool `json:"bot_require_code_grant,omitempty"` + BotPublic bool `json:"bot_public,omitempty"` + RPCApplicationState int `json:"rpc_application_state,omitempty"` + Flags int `json:"flags,omitempty"` + Owner *User `json:"owner"` + Bot *User `json:"bot"` +} + +// Application returns an Application structure of a specific Application +// appID : The ID of an Application +func (s *Session) Application(appID string) (st *Application, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointApplication(appID), nil, EndpointApplication("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// Applications returns all applications for the authenticated user +func (s *Session) Applications() (st []*Application, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointApplications, nil, EndpointApplications) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ApplicationCreate creates a new Application +// name : Name of Application / Bot +// uris : Redirect URIs (Not required) +func (s *Session) ApplicationCreate(ap *Application) (st *Application, err error) { + + data := struct { + Name string `json:"name"` + Description string `json:"description"` + RedirectURIs *[]string `json:"redirect_uris,omitempty"` + }{ap.Name, ap.Description, ap.RedirectURIs} + + body, err := s.RequestWithBucketID("POST", EndpointApplications, data, EndpointApplications) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ApplicationUpdate updates an existing Application +// var : desc +func (s *Session) ApplicationUpdate(appID string, ap *Application) (st *Application, err error) { + + data := struct { + Name string `json:"name"` + Description string `json:"description"` + RedirectURIs *[]string `json:"redirect_uris,omitempty"` + }{ap.Name, ap.Description, ap.RedirectURIs} + + body, err := s.RequestWithBucketID("PUT", EndpointApplication(appID), data, EndpointApplication("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ApplicationDelete deletes an existing Application +// appID : The ID of an Application +func (s *Session) ApplicationDelete(appID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointApplication(appID), nil, EndpointApplication("")) + if err != nil { + return + } + + return +} + +// ------------------------------------------------------------------------------------------------ +// Code specific to Discord OAuth2 Application Bots +// ------------------------------------------------------------------------------------------------ + +// ApplicationBotCreate creates an Application Bot Account +// +// appID : The ID of an Application +// +// NOTE: func name may change, if I can think up something better. +func (s *Session) ApplicationBotCreate(appID string) (st *User, err error) { + + body, err := s.RequestWithBucketID("POST", EndpointApplicationsBot(appID), nil, EndpointApplicationsBot("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} diff --git a/vendor/github.com/bwmarrin/discordgo/ratelimit.go b/vendor/github.com/bwmarrin/discordgo/ratelimit.go new file mode 100644 index 0000000..876e98a --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/ratelimit.go @@ -0,0 +1,144 @@ +package discordgo + +import ( + "net/http" + "strconv" + "sync" + "sync/atomic" + "time" +) + +// RateLimiter holds all ratelimit buckets +type RateLimiter struct { + sync.Mutex + global *int64 + buckets map[string]*Bucket + globalRateLimit time.Duration +} + +// NewRatelimiter returns a new RateLimiter +func NewRatelimiter() *RateLimiter { + + return &RateLimiter{ + buckets: make(map[string]*Bucket), + global: new(int64), + } +} + +// getBucket retrieves or creates a bucket +func (r *RateLimiter) getBucket(key string) *Bucket { + r.Lock() + defer r.Unlock() + + if bucket, ok := r.buckets[key]; ok { + return bucket + } + + b := &Bucket{ + remaining: 1, + Key: key, + global: r.global, + } + + r.buckets[key] = b + return b +} + +// LockBucket Locks until a request can be made +func (r *RateLimiter) LockBucket(bucketID string) *Bucket { + + b := r.getBucket(bucketID) + + b.Lock() + + // If we ran out of calls and the reset time is still ahead of us + // then we need to take it easy and relax a little + if b.remaining < 1 && b.reset.After(time.Now()) { + time.Sleep(b.reset.Sub(time.Now())) + + } + + // Check for global ratelimits + sleepTo := time.Unix(0, atomic.LoadInt64(r.global)) + if now := time.Now(); now.Before(sleepTo) { + time.Sleep(sleepTo.Sub(now)) + } + + b.remaining-- + return b +} + +// Bucket represents a ratelimit bucket, each bucket gets ratelimited individually (-global ratelimits) +type Bucket struct { + sync.Mutex + Key string + remaining int + limit int + reset time.Time + global *int64 +} + +// Release unlocks the bucket and reads the headers to update the buckets ratelimit info +// and locks up the whole thing in case if there's a global ratelimit. +func (b *Bucket) Release(headers http.Header) error { + + defer b.Unlock() + if headers == nil { + return nil + } + + remaining := headers.Get("X-RateLimit-Remaining") + reset := headers.Get("X-RateLimit-Reset") + global := headers.Get("X-RateLimit-Global") + retryAfter := headers.Get("Retry-After") + + // Update global and per bucket reset time if the proper headers are available + // If global is set, then it will block all buckets until after Retry-After + // If Retry-After without global is provided it will use that for the new reset + // time since it's more accurate than X-RateLimit-Reset. + // If Retry-After after is not proided, it will update the reset time from X-RateLimit-Reset + if retryAfter != "" { + parsedAfter, err := strconv.ParseInt(retryAfter, 10, 64) + if err != nil { + return err + } + + resetAt := time.Now().Add(time.Duration(parsedAfter) * time.Millisecond) + + // Lock either this single bucket or all buckets + if global != "" { + atomic.StoreInt64(b.global, resetAt.UnixNano()) + } else { + b.reset = resetAt + } + } else if reset != "" { + // Calculate the reset time by using the date header returned from discord + discordTime, err := http.ParseTime(headers.Get("Date")) + if err != nil { + return err + } + + unix, err := strconv.ParseInt(reset, 10, 64) + if err != nil { + return err + } + + // Calculate the time until reset and add it to the current local time + // some extra time is added because without it i still encountered 429's. + // The added amount is the lowest amount that gave no 429's + // in 1k requests + delta := time.Unix(unix, 0).Sub(discordTime) + time.Millisecond*250 + b.reset = time.Now().Add(delta) + } + + // Udpate remaining if header is present + if remaining != "" { + parsedRemaining, err := strconv.ParseInt(remaining, 10, 32) + if err != nil { + return err + } + b.remaining = int(parsedRemaining) + } + + return nil +} diff --git a/vendor/github.com/bwmarrin/discordgo/restapi.go b/vendor/github.com/bwmarrin/discordgo/restapi.go new file mode 100644 index 0000000..cb482e6 --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/restapi.go @@ -0,0 +1,1983 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains functions for interacting with the Discord REST/JSON API +// at the lowest level. + +package discordgo + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "image" + _ "image/jpeg" // For JPEG decoding + _ "image/png" // For PNG decoding + "io" + "io/ioutil" + "log" + "mime/multipart" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +// All error constants +var ( + ErrJSONUnmarshal = errors.New("json unmarshal") + ErrStatusOffline = errors.New("You can't set your Status to offline") + ErrVerificationLevelBounds = errors.New("VerificationLevel out of bounds, should be between 0 and 3") + ErrPruneDaysBounds = errors.New("the number of days should be more than or equal to 1") + ErrGuildNoIcon = errors.New("guild does not have an icon set") + ErrGuildNoSplash = errors.New("guild does not have a splash set") +) + +// Request is the same as RequestWithBucketID but the bucket id is the same as the urlStr +func (s *Session) Request(method, urlStr string, data interface{}) (response []byte, err error) { + return s.RequestWithBucketID(method, urlStr, data, strings.SplitN(urlStr, "?", 2)[0]) +} + +// RequestWithBucketID makes a (GET/POST/...) Requests to Discord REST API with JSON data. +func (s *Session) RequestWithBucketID(method, urlStr string, data interface{}, bucketID string) (response []byte, err error) { + var body []byte + if data != nil { + body, err = json.Marshal(data) + if err != nil { + return + } + } + + return s.request(method, urlStr, "application/json", body, bucketID, 0) +} + +// request makes a (GET/POST/...) Requests to Discord REST API. +// Sequence is the sequence number, if it fails with a 502 it will +// retry with sequence+1 until it either succeeds or sequence >= session.MaxRestRetries +func (s *Session) request(method, urlStr, contentType string, b []byte, bucketID string, sequence int) (response []byte, err error) { + if bucketID == "" { + bucketID = strings.SplitN(urlStr, "?", 2)[0] + } + + bucket := s.ratelimiter.LockBucket(bucketID) + + if s.Debug { + log.Printf("API REQUEST %8s :: %s\n", method, urlStr) + log.Printf("API REQUEST PAYLOAD :: [%s]\n", string(b)) + } + + req, err := http.NewRequest(method, urlStr, bytes.NewBuffer(b)) + if err != nil { + bucket.Release(nil) + return + } + + // Not used on initial login.. + // TODO: Verify if a login, otherwise complain about no-token + if s.Token != "" { + req.Header.Set("authorization", s.Token) + } + + req.Header.Set("Content-Type", contentType) + // TODO: Make a configurable static variable. + req.Header.Set("User-Agent", fmt.Sprintf("DiscordBot (https://github.com/bwmarrin/discordgo, v%s)", VERSION)) + + if s.Debug { + for k, v := range req.Header { + log.Printf("API REQUEST HEADER :: [%s] = %+v\n", k, v) + } + } + + resp, err := s.Client.Do(req) + if err != nil { + bucket.Release(nil) + return + } + defer func() { + err2 := resp.Body.Close() + if err2 != nil { + log.Println("error closing resp body") + } + }() + + err = bucket.Release(resp.Header) + if err != nil { + return + } + + response, err = ioutil.ReadAll(resp.Body) + if err != nil { + return + } + + if s.Debug { + + log.Printf("API RESPONSE STATUS :: %s\n", resp.Status) + for k, v := range resp.Header { + log.Printf("API RESPONSE HEADER :: [%s] = %+v\n", k, v) + } + log.Printf("API RESPONSE BODY :: [%s]\n\n\n", response) + } + + switch resp.StatusCode { + + case http.StatusOK: + case http.StatusCreated: + case http.StatusNoContent: + + // TODO check for 401 response, invalidate token if we get one. + + case http.StatusBadGateway: + // Retry sending request if possible + if sequence < s.MaxRestRetries { + + s.log(LogInformational, "%s Failed (%s), Retrying...", urlStr, resp.Status) + response, err = s.request(method, urlStr, contentType, b, bucketID, sequence+1) + } else { + err = fmt.Errorf("Exceeded Max retries HTTP %s, %s", resp.Status, response) + } + + case 429: // TOO MANY REQUESTS - Rate limiting + rl := TooManyRequests{} + err = json.Unmarshal(response, &rl) + if err != nil { + s.log(LogError, "rate limit unmarshal error, %s", err) + return + } + s.log(LogInformational, "Rate Limiting %s, retry in %d", urlStr, rl.RetryAfter) + s.handleEvent(rateLimitEventType, RateLimit{TooManyRequests: &rl, URL: urlStr}) + + time.Sleep(rl.RetryAfter * time.Millisecond) + // we can make the above smarter + // this method can cause longer delays than required + + response, err = s.request(method, urlStr, contentType, b, bucketID, sequence) + + default: // Error condition + err = newRestError(req, resp, response) + } + + return +} + +func unmarshal(data []byte, v interface{}) error { + err := json.Unmarshal(data, v) + if err != nil { + return ErrJSONUnmarshal + } + + return nil +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Sessions +// ------------------------------------------------------------------------------------------------ + +// Login asks the Discord server for an authentication token. +// +// NOTE: While email/pass authentication is supported by DiscordGo it is +// HIGHLY DISCOURAGED by Discord. Please only use email/pass to obtain a token +// and then use that authentication token for all future connections. +// Also, doing any form of automation with a user (non Bot) account may result +// in that account being permanently banned from Discord. +func (s *Session) Login(email, password string) (err error) { + + data := struct { + Email string `json:"email"` + Password string `json:"password"` + }{email, password} + + response, err := s.RequestWithBucketID("POST", EndpointLogin, data, EndpointLogin) + if err != nil { + return + } + + temp := struct { + Token string `json:"token"` + MFA bool `json:"mfa"` + }{} + + err = unmarshal(response, &temp) + if err != nil { + return + } + + s.Token = temp.Token + s.MFA = temp.MFA + return +} + +// Register sends a Register request to Discord, and returns the authentication token +// Note that this account is temporary and should be verified for future use. +// Another option is to save the authentication token external, but this isn't recommended. +func (s *Session) Register(username string) (token string, err error) { + + data := struct { + Username string `json:"username"` + }{username} + + response, err := s.RequestWithBucketID("POST", EndpointRegister, data, EndpointRegister) + if err != nil { + return + } + + temp := struct { + Token string `json:"token"` + }{} + + err = unmarshal(response, &temp) + if err != nil { + return + } + + token = temp.Token + return +} + +// Logout sends a logout request to Discord. +// This does not seem to actually invalidate the token. So you can still +// make API calls even after a Logout. So, it seems almost pointless to +// even use. +func (s *Session) Logout() (err error) { + + // _, err = s.Request("POST", LOGOUT, fmt.Sprintf(`{"token": "%s"}`, s.Token)) + + if s.Token == "" { + return + } + + data := struct { + Token string `json:"token"` + }{s.Token} + + _, err = s.RequestWithBucketID("POST", EndpointLogout, data, EndpointLogout) + return +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Users +// ------------------------------------------------------------------------------------------------ + +// User returns the user details of the given userID +// userID : A user ID or "@me" which is a shortcut of current user ID +func (s *Session) User(userID string) (st *User, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointUser(userID), nil, EndpointUsers) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserAvatar is deprecated. Please use UserAvatarDecode +// userID : A user ID or "@me" which is a shortcut of current user ID +func (s *Session) UserAvatar(userID string) (img image.Image, err error) { + u, err := s.User(userID) + if err != nil { + return + } + img, err = s.UserAvatarDecode(u) + return +} + +// UserAvatarDecode returns an image.Image of a user's Avatar +// user : The user which avatar should be retrieved +func (s *Session) UserAvatarDecode(u *User) (img image.Image, err error) { + body, err := s.RequestWithBucketID("GET", EndpointUserAvatar(u.ID, u.Avatar), nil, EndpointUserAvatar("", "")) + if err != nil { + return + } + + img, _, err = image.Decode(bytes.NewReader(body)) + return +} + +// UserUpdate updates a users settings. +func (s *Session) UserUpdate(email, password, username, avatar, newPassword string) (st *User, err error) { + + // NOTE: Avatar must be either the hash/id of existing Avatar or + // data:image/png;base64,BASE64_STRING_OF_NEW_AVATAR_PNG + // to set a new avatar. + // If left blank, avatar will be set to null/blank + + data := struct { + Email string `json:"email"` + Password string `json:"password"` + Username string `json:"username,omitempty"` + Avatar string `json:"avatar,omitempty"` + NewPassword string `json:"new_password,omitempty"` + }{email, password, username, avatar, newPassword} + + body, err := s.RequestWithBucketID("PATCH", EndpointUser("@me"), data, EndpointUsers) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserSettings returns the settings for a given user +func (s *Session) UserSettings() (st *Settings, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointUserSettings("@me"), nil, EndpointUserSettings("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserUpdateStatus update the user status +// status : The new status (Actual valid status are 'online','idle','dnd','invisible') +func (s *Session) UserUpdateStatus(status Status) (st *Settings, err error) { + if status == StatusOffline { + err = ErrStatusOffline + return + } + + data := struct { + Status Status `json:"status"` + }{status} + + body, err := s.RequestWithBucketID("PATCH", EndpointUserSettings("@me"), data, EndpointUserSettings("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserChannels returns an array of Channel structures for all private +// channels. +func (s *Session) UserChannels() (st []*Channel, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointUserChannels("@me"), nil, EndpointUserChannels("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserChannelCreate creates a new User (Private) Channel with another User +// recipientID : A user ID for the user to which this channel is opened with. +func (s *Session) UserChannelCreate(recipientID string) (st *Channel, err error) { + + data := struct { + RecipientID string `json:"recipient_id"` + }{recipientID} + + body, err := s.RequestWithBucketID("POST", EndpointUserChannels("@me"), data, EndpointUserChannels("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserGuilds returns an array of UserGuild structures for all guilds. +// limit : The number guilds that can be returned. (max 100) +// beforeID : If provided all guilds returned will be before given ID. +// afterID : If provided all guilds returned will be after given ID. +func (s *Session) UserGuilds(limit int, beforeID, afterID string) (st []*UserGuild, err error) { + + v := url.Values{} + + if limit > 0 { + v.Set("limit", strconv.Itoa(limit)) + } + if afterID != "" { + v.Set("after", afterID) + } + if beforeID != "" { + v.Set("before", beforeID) + } + + uri := EndpointUserGuilds("@me") + + if len(v) > 0 { + uri = fmt.Sprintf("%s?%s", uri, v.Encode()) + } + + body, err := s.RequestWithBucketID("GET", uri, nil, EndpointUserGuilds("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserGuildSettingsEdit Edits the users notification settings for a guild +// guildID : The ID of the guild to edit the settings on +// settings : The settings to update +func (s *Session) UserGuildSettingsEdit(guildID string, settings *UserGuildSettingsEdit) (st *UserGuildSettings, err error) { + + body, err := s.RequestWithBucketID("PATCH", EndpointUserGuildSettings("@me", guildID), settings, EndpointUserGuildSettings("", guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// UserChannelPermissions returns the permission of a user in a channel. +// userID : The ID of the user to calculate permissions for. +// channelID : The ID of the channel to calculate permission for. +// +// NOTE: This function is now deprecated and will be removed in the future. +// Please see the same function inside state.go +func (s *Session) UserChannelPermissions(userID, channelID string) (apermissions int, err error) { + // Try to just get permissions from state. + apermissions, err = s.State.UserChannelPermissions(userID, channelID) + if err == nil { + return + } + + // Otherwise try get as much data from state as possible, falling back to the network. + channel, err := s.State.Channel(channelID) + if err != nil || channel == nil { + channel, err = s.Channel(channelID) + if err != nil { + return + } + } + + guild, err := s.State.Guild(channel.GuildID) + if err != nil || guild == nil { + guild, err = s.Guild(channel.GuildID) + if err != nil { + return + } + } + + if userID == guild.OwnerID { + apermissions = PermissionAll + return + } + + member, err := s.State.Member(guild.ID, userID) + if err != nil || member == nil { + member, err = s.GuildMember(guild.ID, userID) + if err != nil { + return + } + } + + return memberPermissions(guild, channel, member), nil +} + +// Calculates the permissions for a member. +// https://support.discordapp.com/hc/en-us/articles/206141927-How-is-the-permission-hierarchy-structured- +func memberPermissions(guild *Guild, channel *Channel, member *Member) (apermissions int) { + userID := member.User.ID + + if userID == guild.OwnerID { + apermissions = PermissionAll + return + } + + for _, role := range guild.Roles { + if role.ID == guild.ID { + apermissions |= role.Permissions + break + } + } + + for _, role := range guild.Roles { + for _, roleID := range member.Roles { + if role.ID == roleID { + apermissions |= role.Permissions + break + } + } + } + + if apermissions&PermissionAdministrator == PermissionAdministrator { + apermissions |= PermissionAll + } + + // Apply @everyone overrides from the channel. + for _, overwrite := range channel.PermissionOverwrites { + if guild.ID == overwrite.ID { + apermissions &= ^overwrite.Deny + apermissions |= overwrite.Allow + break + } + } + + denies := 0 + allows := 0 + + // Member overwrites can override role overrides, so do two passes + for _, overwrite := range channel.PermissionOverwrites { + for _, roleID := range member.Roles { + if overwrite.Type == "role" && roleID == overwrite.ID { + denies |= overwrite.Deny + allows |= overwrite.Allow + break + } + } + } + + apermissions &= ^denies + apermissions |= allows + + for _, overwrite := range channel.PermissionOverwrites { + if overwrite.Type == "member" && overwrite.ID == userID { + apermissions &= ^overwrite.Deny + apermissions |= overwrite.Allow + break + } + } + + if apermissions&PermissionAdministrator == PermissionAdministrator { + apermissions |= PermissionAllChannel + } + + return apermissions +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Guilds +// ------------------------------------------------------------------------------------------------ + +// Guild returns a Guild structure of a specific Guild. +// guildID : The ID of a Guild +func (s *Session) Guild(guildID string) (st *Guild, err error) { + if s.StateEnabled { + // Attempt to grab the guild from State first. + st, err = s.State.Guild(guildID) + if err == nil { + return + } + } + + body, err := s.RequestWithBucketID("GET", EndpointGuild(guildID), nil, EndpointGuild(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildCreate creates a new Guild +// name : A name for the Guild (2-100 characters) +func (s *Session) GuildCreate(name string) (st *Guild, err error) { + + data := struct { + Name string `json:"name"` + }{name} + + body, err := s.RequestWithBucketID("POST", EndpointGuilds, data, EndpointGuilds) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildEdit edits a new Guild +// guildID : The ID of a Guild +// g : A GuildParams struct with the values Name, Region and VerificationLevel defined. +func (s *Session) GuildEdit(guildID string, g GuildParams) (st *Guild, err error) { + + // Bounds checking for VerificationLevel, interval: [0, 3] + if g.VerificationLevel != nil { + val := *g.VerificationLevel + if val < 0 || val > 3 { + err = ErrVerificationLevelBounds + return + } + } + + //Bounds checking for regions + if g.Region != "" { + isValid := false + regions, _ := s.VoiceRegions() + for _, r := range regions { + if g.Region == r.ID { + isValid = true + } + } + if !isValid { + var valid []string + for _, r := range regions { + valid = append(valid, r.ID) + } + err = fmt.Errorf("Region not a valid region (%q)", valid) + return + } + } + + body, err := s.RequestWithBucketID("PATCH", EndpointGuild(guildID), g, EndpointGuild(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildDelete deletes a Guild. +// guildID : The ID of a Guild +func (s *Session) GuildDelete(guildID string) (st *Guild, err error) { + + body, err := s.RequestWithBucketID("DELETE", EndpointGuild(guildID), nil, EndpointGuild(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildLeave leaves a Guild. +// guildID : The ID of a Guild +func (s *Session) GuildLeave(guildID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointUserGuild("@me", guildID), nil, EndpointUserGuild("", guildID)) + return +} + +// GuildBans returns an array of User structures for all bans of a +// given guild. +// guildID : The ID of a Guild. +func (s *Session) GuildBans(guildID string) (st []*GuildBan, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointGuildBans(guildID), nil, EndpointGuildBans(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// GuildBanCreate bans the given user from the given guild. +// guildID : The ID of a Guild. +// userID : The ID of a User +// days : The number of days of previous comments to delete. +func (s *Session) GuildBanCreate(guildID, userID string, days int) (err error) { + return s.GuildBanCreateWithReason(guildID, userID, "", days) +} + +// GuildBanCreateWithReason bans the given user from the given guild also providing a reaso. +// guildID : The ID of a Guild. +// userID : The ID of a User +// reason : The reason for this ban +// days : The number of days of previous comments to delete. +func (s *Session) GuildBanCreateWithReason(guildID, userID, reason string, days int) (err error) { + + uri := EndpointGuildBan(guildID, userID) + + queryParams := url.Values{} + if days > 0 { + queryParams.Set("delete-message-days", strconv.Itoa(days)) + } + if reason != "" { + queryParams.Set("reason", reason) + } + + if len(queryParams) > 0 { + uri += "?" + queryParams.Encode() + } + + _, err = s.RequestWithBucketID("PUT", uri, nil, EndpointGuildBan(guildID, "")) + return +} + +// GuildBanDelete removes the given user from the guild bans +// guildID : The ID of a Guild. +// userID : The ID of a User +func (s *Session) GuildBanDelete(guildID, userID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointGuildBan(guildID, userID), nil, EndpointGuildBan(guildID, "")) + return +} + +// GuildMembers returns a list of members for a guild. +// guildID : The ID of a Guild. +// after : The id of the member to return members after +// limit : max number of members to return (max 1000) +func (s *Session) GuildMembers(guildID string, after string, limit int) (st []*Member, err error) { + + uri := EndpointGuildMembers(guildID) + + v := url.Values{} + + if after != "" { + v.Set("after", after) + } + + if limit > 0 { + v.Set("limit", strconv.Itoa(limit)) + } + + if len(v) > 0 { + uri = fmt.Sprintf("%s?%s", uri, v.Encode()) + } + + body, err := s.RequestWithBucketID("GET", uri, nil, EndpointGuildMembers(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildMember returns a member of a guild. +// guildID : The ID of a Guild. +// userID : The ID of a User +func (s *Session) GuildMember(guildID, userID string) (st *Member, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointGuildMember(guildID, userID), nil, EndpointGuildMember(guildID, "")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildMemberDelete removes the given user from the given guild. +// guildID : The ID of a Guild. +// userID : The ID of a User +func (s *Session) GuildMemberDelete(guildID, userID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointGuildMember(guildID, userID), nil, EndpointGuildMember(guildID, "")) + return +} + +// GuildMemberEdit edits the roles of a member. +// guildID : The ID of a Guild. +// userID : The ID of a User. +// roles : A list of role ID's to set on the member. +func (s *Session) GuildMemberEdit(guildID, userID string, roles []string) (err error) { + + data := struct { + Roles []string `json:"roles"` + }{roles} + + _, err = s.RequestWithBucketID("PATCH", EndpointGuildMember(guildID, userID), data, EndpointGuildMember(guildID, "")) + if err != nil { + return + } + + return +} + +// GuildMemberMove moves a guild member from one voice channel to another/none +// guildID : The ID of a Guild. +// userID : The ID of a User. +// channelID : The ID of a channel to move user to, or null? +// NOTE : I am not entirely set on the name of this function and it may change +// prior to the final 1.0.0 release of Discordgo +func (s *Session) GuildMemberMove(guildID, userID, channelID string) (err error) { + + data := struct { + ChannelID string `json:"channel_id"` + }{channelID} + + _, err = s.RequestWithBucketID("PATCH", EndpointGuildMember(guildID, userID), data, EndpointGuildMember(guildID, "")) + if err != nil { + return + } + + return +} + +// GuildMemberNickname updates the nickname of a guild member +// guildID : The ID of a guild +// userID : The ID of a user +// userID : The ID of a user or "@me" which is a shortcut of the current user ID +func (s *Session) GuildMemberNickname(guildID, userID, nickname string) (err error) { + + data := struct { + Nick string `json:"nick"` + }{nickname} + + if userID == "@me" { + userID += "/nick" + } + + _, err = s.RequestWithBucketID("PATCH", EndpointGuildMember(guildID, userID), data, EndpointGuildMember(guildID, "")) + return +} + +// GuildMemberRoleAdd adds the specified role to a given member +// guildID : The ID of a Guild. +// userID : The ID of a User. +// roleID : The ID of a Role to be assigned to the user. +func (s *Session) GuildMemberRoleAdd(guildID, userID, roleID string) (err error) { + + _, err = s.RequestWithBucketID("PUT", EndpointGuildMemberRole(guildID, userID, roleID), nil, EndpointGuildMemberRole(guildID, "", "")) + + return +} + +// GuildMemberRoleRemove removes the specified role to a given member +// guildID : The ID of a Guild. +// userID : The ID of a User. +// roleID : The ID of a Role to be removed from the user. +func (s *Session) GuildMemberRoleRemove(guildID, userID, roleID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointGuildMemberRole(guildID, userID, roleID), nil, EndpointGuildMemberRole(guildID, "", "")) + + return +} + +// GuildChannels returns an array of Channel structures for all channels of a +// given guild. +// guildID : The ID of a Guild. +func (s *Session) GuildChannels(guildID string) (st []*Channel, err error) { + + body, err := s.request("GET", EndpointGuildChannels(guildID), "", nil, EndpointGuildChannels(guildID), 0) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// GuildChannelCreate creates a new channel in the given guild +// guildID : The ID of a Guild. +// name : Name of the channel (2-100 chars length) +// ctype : Tpye of the channel (voice or text) +func (s *Session) GuildChannelCreate(guildID, name, ctype string) (st *Channel, err error) { + + data := struct { + Name string `json:"name"` + Type string `json:"type"` + }{name, ctype} + + body, err := s.RequestWithBucketID("POST", EndpointGuildChannels(guildID), data, EndpointGuildChannels(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildChannelsReorder updates the order of channels in a guild +// guildID : The ID of a Guild. +// channels : Updated channels. +func (s *Session) GuildChannelsReorder(guildID string, channels []*Channel) (err error) { + + _, err = s.RequestWithBucketID("PATCH", EndpointGuildChannels(guildID), channels, EndpointGuildChannels(guildID)) + return +} + +// GuildInvites returns an array of Invite structures for the given guild +// guildID : The ID of a Guild. +func (s *Session) GuildInvites(guildID string) (st []*Invite, err error) { + body, err := s.RequestWithBucketID("GET", EndpointGuildInvites(guildID), nil, EndpointGuildInivtes(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildRoles returns all roles for a given guild. +// guildID : The ID of a Guild. +func (s *Session) GuildRoles(guildID string) (st []*Role, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointGuildRoles(guildID), nil, EndpointGuildRoles(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return // TODO return pointer +} + +// GuildRoleCreate returns a new Guild Role. +// guildID: The ID of a Guild. +func (s *Session) GuildRoleCreate(guildID string) (st *Role, err error) { + + body, err := s.RequestWithBucketID("POST", EndpointGuildRoles(guildID), nil, EndpointGuildRoles(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// GuildRoleEdit updates an existing Guild Role with new values +// guildID : The ID of a Guild. +// roleID : The ID of a Role. +// name : The name of the Role. +// color : The color of the role (decimal, not hex). +// hoist : Whether to display the role's users separately. +// perm : The permissions for the role. +// mention : Whether this role is mentionable +func (s *Session) GuildRoleEdit(guildID, roleID, name string, color int, hoist bool, perm int, mention bool) (st *Role, err error) { + + // Prevent sending a color int that is too big. + if color > 0xFFFFFF { + err = fmt.Errorf("color value cannot be larger than 0xFFFFFF") + } + + data := struct { + Name string `json:"name"` // The role's name (overwrites existing) + Color int `json:"color"` // The color the role should have (as a decimal, not hex) + Hoist bool `json:"hoist"` // Whether to display the role's users separately + Permissions int `json:"permissions"` // The overall permissions number of the role (overwrites existing) + Mentionable bool `json:"mentionable"` // Whether this role is mentionable + }{name, color, hoist, perm, mention} + + body, err := s.RequestWithBucketID("PATCH", EndpointGuildRole(guildID, roleID), data, EndpointGuildRole(guildID, "")) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// GuildRoleReorder reoders guild roles +// guildID : The ID of a Guild. +// roles : A list of ordered roles. +func (s *Session) GuildRoleReorder(guildID string, roles []*Role) (st []*Role, err error) { + + body, err := s.RequestWithBucketID("PATCH", EndpointGuildRoles(guildID), roles, EndpointGuildRoles(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// GuildRoleDelete deletes an existing role. +// guildID : The ID of a Guild. +// roleID : The ID of a Role. +func (s *Session) GuildRoleDelete(guildID, roleID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointGuildRole(guildID, roleID), nil, EndpointGuildRole(guildID, "")) + + return +} + +// GuildPruneCount Returns the number of members that would be removed in a prune operation. +// Requires 'KICK_MEMBER' permission. +// guildID : The ID of a Guild. +// days : The number of days to count prune for (1 or more). +func (s *Session) GuildPruneCount(guildID string, days uint32) (count uint32, err error) { + count = 0 + + if days <= 0 { + err = ErrPruneDaysBounds + return + } + + p := struct { + Pruned uint32 `json:"pruned"` + }{} + + uri := EndpointGuildPrune(guildID) + fmt.Sprintf("?days=%d", days) + body, err := s.RequestWithBucketID("GET", uri, nil, EndpointGuildPrune(guildID)) + + err = unmarshal(body, &p) + if err != nil { + return + } + + count = p.Pruned + + return +} + +// GuildPrune Begin as prune operation. Requires the 'KICK_MEMBERS' permission. +// Returns an object with one 'pruned' key indicating the number of members that were removed in the prune operation. +// guildID : The ID of a Guild. +// days : The number of days to count prune for (1 or more). +func (s *Session) GuildPrune(guildID string, days uint32) (count uint32, err error) { + + count = 0 + + if days <= 0 { + err = ErrPruneDaysBounds + return + } + + data := struct { + days uint32 + }{days} + + p := struct { + Pruned uint32 `json:"pruned"` + }{} + + body, err := s.RequestWithBucketID("POST", EndpointGuildPrune(guildID), data, EndpointGuildPrune(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &p) + if err != nil { + return + } + + count = p.Pruned + + return +} + +// GuildIntegrations returns an array of Integrations for a guild. +// guildID : The ID of a Guild. +func (s *Session) GuildIntegrations(guildID string) (st []*GuildIntegration, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointGuildIntegrations(guildID), nil, EndpointGuildIntegrations(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// GuildIntegrationCreate creates a Guild Integration. +// guildID : The ID of a Guild. +// integrationType : The Integration type. +// integrationID : The ID of an integration. +func (s *Session) GuildIntegrationCreate(guildID, integrationType, integrationID string) (err error) { + + data := struct { + Type string `json:"type"` + ID string `json:"id"` + }{integrationType, integrationID} + + _, err = s.RequestWithBucketID("POST", EndpointGuildIntegrations(guildID), data, EndpointGuildIntegrations(guildID)) + return +} + +// GuildIntegrationEdit edits a Guild Integration. +// guildID : The ID of a Guild. +// integrationType : The Integration type. +// integrationID : The ID of an integration. +// expireBehavior : The behavior when an integration subscription lapses (see the integration object documentation). +// expireGracePeriod : Period (in seconds) where the integration will ignore lapsed subscriptions. +// enableEmoticons : Whether emoticons should be synced for this integration (twitch only currently). +func (s *Session) GuildIntegrationEdit(guildID, integrationID string, expireBehavior, expireGracePeriod int, enableEmoticons bool) (err error) { + + data := struct { + ExpireBehavior int `json:"expire_behavior"` + ExpireGracePeriod int `json:"expire_grace_period"` + EnableEmoticons bool `json:"enable_emoticons"` + }{expireBehavior, expireGracePeriod, enableEmoticons} + + _, err = s.RequestWithBucketID("PATCH", EndpointGuildIntegration(guildID, integrationID), data, EndpointGuildIntegration(guildID, "")) + return +} + +// GuildIntegrationDelete removes the given integration from the Guild. +// guildID : The ID of a Guild. +// integrationID : The ID of an integration. +func (s *Session) GuildIntegrationDelete(guildID, integrationID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointGuildIntegration(guildID, integrationID), nil, EndpointGuildIntegration(guildID, "")) + return +} + +// GuildIntegrationSync syncs an integration. +// guildID : The ID of a Guild. +// integrationID : The ID of an integration. +func (s *Session) GuildIntegrationSync(guildID, integrationID string) (err error) { + + _, err = s.RequestWithBucketID("POST", EndpointGuildIntegrationSync(guildID, integrationID), nil, EndpointGuildIntegration(guildID, "")) + return +} + +// GuildIcon returns an image.Image of a guild icon. +// guildID : The ID of a Guild. +func (s *Session) GuildIcon(guildID string) (img image.Image, err error) { + g, err := s.Guild(guildID) + if err != nil { + return + } + + if g.Icon == "" { + err = ErrGuildNoIcon + return + } + + body, err := s.RequestWithBucketID("GET", EndpointGuildIcon(guildID, g.Icon), nil, EndpointGuildIcon(guildID, "")) + if err != nil { + return + } + + img, _, err = image.Decode(bytes.NewReader(body)) + return +} + +// GuildSplash returns an image.Image of a guild splash image. +// guildID : The ID of a Guild. +func (s *Session) GuildSplash(guildID string) (img image.Image, err error) { + g, err := s.Guild(guildID) + if err != nil { + return + } + + if g.Splash == "" { + err = ErrGuildNoSplash + return + } + + body, err := s.RequestWithBucketID("GET", EndpointGuildSplash(guildID, g.Splash), nil, EndpointGuildSplash(guildID, "")) + if err != nil { + return + } + + img, _, err = image.Decode(bytes.NewReader(body)) + return +} + +// GuildEmbed returns the embed for a Guild. +// guildID : The ID of a Guild. +func (s *Session) GuildEmbed(guildID string) (st *GuildEmbed, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointGuildEmbed(guildID), nil, EndpointGuildEmbed(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// GuildEmbedEdit returns the embed for a Guild. +// guildID : The ID of a Guild. +func (s *Session) GuildEmbedEdit(guildID string, enabled bool, channelID string) (err error) { + + data := GuildEmbed{enabled, channelID} + + _, err = s.RequestWithBucketID("PATCH", EndpointGuildEmbed(guildID), data, EndpointGuildEmbed(guildID)) + return +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Channels +// ------------------------------------------------------------------------------------------------ + +// Channel returns a Channel strucutre of a specific Channel. +// channelID : The ID of the Channel you want returned. +func (s *Session) Channel(channelID string) (st *Channel, err error) { + body, err := s.RequestWithBucketID("GET", EndpointChannel(channelID), nil, EndpointChannel(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelEdit edits the given channel +// channelID : The ID of a Channel +// name : The new name to assign the channel. +func (s *Session) ChannelEdit(channelID, name string) (st *Channel, err error) { + + data := struct { + Name string `json:"name"` + }{name} + + body, err := s.RequestWithBucketID("PATCH", EndpointChannel(channelID), data, EndpointChannel(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelDelete deletes the given channel +// channelID : The ID of a Channel +func (s *Session) ChannelDelete(channelID string) (st *Channel, err error) { + + body, err := s.RequestWithBucketID("DELETE", EndpointChannel(channelID), nil, EndpointChannel(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelTyping broadcasts to all members that authenticated user is typing in +// the given channel. +// channelID : The ID of a Channel +func (s *Session) ChannelTyping(channelID string) (err error) { + + _, err = s.RequestWithBucketID("POST", EndpointChannelTyping(channelID), nil, EndpointChannelTyping(channelID)) + return +} + +// ChannelMessages returns an array of Message structures for messages within +// a given channel. +// channelID : The ID of a Channel. +// limit : The number messages that can be returned. (max 100) +// beforeID : If provided all messages returned will be before given ID. +// afterID : If provided all messages returned will be after given ID. +// aroundID : If provided all messages returned will be around given ID. +func (s *Session) ChannelMessages(channelID string, limit int, beforeID, afterID, aroundID string) (st []*Message, err error) { + + uri := EndpointChannelMessages(channelID) + + v := url.Values{} + if limit > 0 { + v.Set("limit", strconv.Itoa(limit)) + } + if afterID != "" { + v.Set("after", afterID) + } + if beforeID != "" { + v.Set("before", beforeID) + } + if aroundID != "" { + v.Set("around", aroundID) + } + if len(v) > 0 { + uri = fmt.Sprintf("%s?%s", uri, v.Encode()) + } + + body, err := s.RequestWithBucketID("GET", uri, nil, EndpointChannelMessages(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelMessage gets a single message by ID from a given channel. +// channeld : The ID of a Channel +// messageID : the ID of a Message +func (s *Session) ChannelMessage(channelID, messageID string) (st *Message, err error) { + + response, err := s.RequestWithBucketID("GET", EndpointChannelMessage(channelID, messageID), nil, EndpointChannelMessage(channelID, "")) + if err != nil { + return + } + + err = unmarshal(response, &st) + return +} + +// ChannelMessageAck acknowledges and marks the given message as read +// channeld : The ID of a Channel +// messageID : the ID of a Message +// lastToken : token returned by last ack +func (s *Session) ChannelMessageAck(channelID, messageID, lastToken string) (st *Ack, err error) { + + body, err := s.RequestWithBucketID("POST", EndpointChannelMessageAck(channelID, messageID), &Ack{Token: lastToken}, EndpointChannelMessageAck(channelID, "")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelMessageSend sends a message to the given channel. +// channelID : The ID of a Channel. +// content : The message to send. +func (s *Session) ChannelMessageSend(channelID string, content string) (*Message, error) { + return s.ChannelMessageSendComplex(channelID, &MessageSend{ + Content: content, + }) +} + +// ChannelMessageSendComplex sends a message to the given channel. +// channelID : The ID of a Channel. +// data : The message struct to send. +func (s *Session) ChannelMessageSendComplex(channelID string, data *MessageSend) (st *Message, err error) { + if data.Embed != nil && data.Embed.Type == "" { + data.Embed.Type = "rich" + } + + endpoint := EndpointChannelMessages(channelID) + + var response []byte + if data.File != nil { + body := &bytes.Buffer{} + bodywriter := multipart.NewWriter(body) + + // What's a better way of doing this? Reflect? Generator? I'm open to suggestions + + if data.Content != "" { + if err = bodywriter.WriteField("content", data.Content); err != nil { + return + } + } + + if data.Embed != nil { + var embed []byte + embed, err = json.Marshal(data.Embed) + if err != nil { + return + } + err = bodywriter.WriteField("embed", string(embed)) + if err != nil { + return + } + } + + if data.Tts { + if err = bodywriter.WriteField("tts", "true"); err != nil { + return + } + } + + var writer io.Writer + writer, err = bodywriter.CreateFormFile("file", data.File.Name) + if err != nil { + return + } + + _, err = io.Copy(writer, data.File.Reader) + if err != nil { + return + } + + err = bodywriter.Close() + if err != nil { + return + } + + response, err = s.request("POST", endpoint, bodywriter.FormDataContentType(), body.Bytes(), endpoint, 0) + } else { + response, err = s.RequestWithBucketID("POST", endpoint, data, endpoint) + } + if err != nil { + return + } + + err = unmarshal(response, &st) + return +} + +// ChannelMessageSendTTS sends a message to the given channel with Text to Speech. +// channelID : The ID of a Channel. +// content : The message to send. +func (s *Session) ChannelMessageSendTTS(channelID string, content string) (*Message, error) { + return s.ChannelMessageSendComplex(channelID, &MessageSend{ + Content: content, + Tts: true, + }) +} + +// ChannelMessageSendEmbed sends a message to the given channel with embedded data. +// channelID : The ID of a Channel. +// embed : The embed data to send. +func (s *Session) ChannelMessageSendEmbed(channelID string, embed *MessageEmbed) (*Message, error) { + return s.ChannelMessageSendComplex(channelID, &MessageSend{ + Embed: embed, + }) +} + +// ChannelMessageEdit edits an existing message, replacing it entirely with +// the given content. +// channelID : The ID of a Channel +// messageID : The ID of a Message +// content : The contents of the message +func (s *Session) ChannelMessageEdit(channelID, messageID, content string) (*Message, error) { + return s.ChannelMessageEditComplex(NewMessageEdit(channelID, messageID).SetContent(content)) +} + +// ChannelMessageEditComplex edits an existing message, replacing it entirely with +// the given MessageEdit struct +func (s *Session) ChannelMessageEditComplex(m *MessageEdit) (st *Message, err error) { + if m.Embed != nil && m.Embed.Type == "" { + m.Embed.Type = "rich" + } + + response, err := s.RequestWithBucketID("PATCH", EndpointChannelMessage(m.Channel, m.ID), m, EndpointChannelMessage(m.Channel, "")) + if err != nil { + return + } + + err = unmarshal(response, &st) + return +} + +// ChannelMessageEditEmbed edits an existing message with embedded data. +// channelID : The ID of a Channel +// messageID : The ID of a Message +// embed : The embed data to send +func (s *Session) ChannelMessageEditEmbed(channelID, messageID string, embed *MessageEmbed) (*Message, error) { + return s.ChannelMessageEditComplex(NewMessageEdit(channelID, messageID).SetEmbed(embed)) +} + +// ChannelMessageDelete deletes a message from the Channel. +func (s *Session) ChannelMessageDelete(channelID, messageID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointChannelMessage(channelID, messageID), nil, EndpointChannelMessage(channelID, "")) + return +} + +// ChannelMessagesBulkDelete bulk deletes the messages from the channel for the provided messageIDs. +// If only one messageID is in the slice call channelMessageDelete funciton. +// If the slice is empty do nothing. +// channelID : The ID of the channel for the messages to delete. +// messages : The IDs of the messages to be deleted. A slice of string IDs. A maximum of 100 messages. +func (s *Session) ChannelMessagesBulkDelete(channelID string, messages []string) (err error) { + + if len(messages) == 0 { + return + } + + if len(messages) == 1 { + err = s.ChannelMessageDelete(channelID, messages[0]) + return + } + + if len(messages) > 100 { + messages = messages[:100] + } + + data := struct { + Messages []string `json:"messages"` + }{messages} + + _, err = s.RequestWithBucketID("POST", EndpointChannelMessagesBulkDelete(channelID), data, EndpointChannelMessagesBulkDelete(channelID)) + return +} + +// ChannelMessagePin pins a message within a given channel. +// channelID: The ID of a channel. +// messageID: The ID of a message. +func (s *Session) ChannelMessagePin(channelID, messageID string) (err error) { + + _, err = s.RequestWithBucketID("PUT", EndpointChannelMessagePin(channelID, messageID), nil, EndpointChannelMessagePin(channelID, "")) + return +} + +// ChannelMessageUnpin unpins a message within a given channel. +// channelID: The ID of a channel. +// messageID: The ID of a message. +func (s *Session) ChannelMessageUnpin(channelID, messageID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointChannelMessagePin(channelID, messageID), nil, EndpointChannelMessagePin(channelID, "")) + return +} + +// ChannelMessagesPinned returns an array of Message structures for pinned messages +// within a given channel +// channelID : The ID of a Channel. +func (s *Session) ChannelMessagesPinned(channelID string) (st []*Message, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointChannelMessagesPins(channelID), nil, EndpointChannelMessagesPins(channelID)) + + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelFileSend sends a file to the given channel. +// channelID : The ID of a Channel. +// name: The name of the file. +// io.Reader : A reader for the file contents. +func (s *Session) ChannelFileSend(channelID, name string, r io.Reader) (*Message, error) { + return s.ChannelMessageSendComplex(channelID, &MessageSend{File: &File{Name: name, Reader: r}}) +} + +// ChannelFileSendWithMessage sends a file to the given channel with an message. +// DEPRECATED. Use ChannelMessageSendComplex instead. +// channelID : The ID of a Channel. +// content: Optional Message content. +// name: The name of the file. +// io.Reader : A reader for the file contents. +func (s *Session) ChannelFileSendWithMessage(channelID, content string, name string, r io.Reader) (*Message, error) { + return s.ChannelMessageSendComplex(channelID, &MessageSend{File: &File{Name: name, Reader: r}, Content: content}) +} + +// ChannelInvites returns an array of Invite structures for the given channel +// channelID : The ID of a Channel +func (s *Session) ChannelInvites(channelID string) (st []*Invite, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointChannelInvites(channelID), nil, EndpointChannelInvites(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelInviteCreate creates a new invite for the given channel. +// channelID : The ID of a Channel +// i : An Invite struct with the values MaxAge, MaxUses, Temporary, +// and XkcdPass defined. +func (s *Session) ChannelInviteCreate(channelID string, i Invite) (st *Invite, err error) { + + data := struct { + MaxAge int `json:"max_age"` + MaxUses int `json:"max_uses"` + Temporary bool `json:"temporary"` + XKCDPass string `json:"xkcdpass"` + }{i.MaxAge, i.MaxUses, i.Temporary, i.XkcdPass} + + body, err := s.RequestWithBucketID("POST", EndpointChannelInvites(channelID), data, EndpointChannelInvites(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ChannelPermissionSet creates a Permission Override for the given channel. +// NOTE: This func name may changed. Using Set instead of Create because +// you can both create a new override or update an override with this function. +func (s *Session) ChannelPermissionSet(channelID, targetID, targetType string, allow, deny int) (err error) { + + data := struct { + ID string `json:"id"` + Type string `json:"type"` + Allow int `json:"allow"` + Deny int `json:"deny"` + }{targetID, targetType, allow, deny} + + _, err = s.RequestWithBucketID("PUT", EndpointChannelPermission(channelID, targetID), data, EndpointChannelPermission(channelID, "")) + return +} + +// ChannelPermissionDelete deletes a specific permission override for the given channel. +// NOTE: Name of this func may change. +func (s *Session) ChannelPermissionDelete(channelID, targetID string) (err error) { + + _, err = s.RequestWithBucketID("DELETE", EndpointChannelPermission(channelID, targetID), nil, EndpointChannelPermission(channelID, "")) + return +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Invites +// ------------------------------------------------------------------------------------------------ + +// Invite returns an Invite structure of the given invite +// inviteID : The invite code (or maybe xkcdpass?) +func (s *Session) Invite(inviteID string) (st *Invite, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointInvite(inviteID), nil, EndpointInvite("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// InviteDelete deletes an existing invite +// inviteID : the code (or maybe xkcdpass?) of an invite +func (s *Session) InviteDelete(inviteID string) (st *Invite, err error) { + + body, err := s.RequestWithBucketID("DELETE", EndpointInvite(inviteID), nil, EndpointInvite("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// InviteAccept accepts an Invite to a Guild or Channel +// inviteID : The invite code (or maybe xkcdpass?) +func (s *Session) InviteAccept(inviteID string) (st *Invite, err error) { + + body, err := s.RequestWithBucketID("POST", EndpointInvite(inviteID), nil, EndpointInvite("")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Voice +// ------------------------------------------------------------------------------------------------ + +// VoiceRegions returns the voice server regions +func (s *Session) VoiceRegions() (st []*VoiceRegion, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointVoiceRegions, nil, EndpointVoiceRegions) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// VoiceICE returns the voice server ICE information +func (s *Session) VoiceICE() (st *VoiceICE, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointVoiceIce, nil, EndpointVoiceIce) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Websockets +// ------------------------------------------------------------------------------------------------ + +// Gateway returns the websocket Gateway address +func (s *Session) Gateway() (gateway string, err error) { + + response, err := s.RequestWithBucketID("GET", EndpointGateway, nil, EndpointGateway) + if err != nil { + return + } + + temp := struct { + URL string `json:"url"` + }{} + + err = unmarshal(response, &temp) + if err != nil { + return + } + + gateway = temp.URL + + // Ensure the gateway always has a trailing slash. + // MacOS will fail to connect if we add query params without a trailing slash on the base domain. + if !strings.HasSuffix(gateway, "/") { + gateway += "/" + } + + return +} + +// Functions specific to Webhooks + +// WebhookCreate returns a new Webhook. +// channelID: The ID of a Channel. +// name : The name of the webhook. +// avatar : The avatar of the webhook. +func (s *Session) WebhookCreate(channelID, name, avatar string) (st *Webhook, err error) { + + data := struct { + Name string `json:"name"` + Avatar string `json:"avatar,omitempty"` + }{name, avatar} + + body, err := s.RequestWithBucketID("POST", EndpointChannelWebhooks(channelID), data, EndpointChannelWebhooks(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// ChannelWebhooks returns all webhooks for a given channel. +// channelID: The ID of a channel. +func (s *Session) ChannelWebhooks(channelID string) (st []*Webhook, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointChannelWebhooks(channelID), nil, EndpointChannelWebhooks(channelID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// GuildWebhooks returns all webhooks for a given guild. +// guildID: The ID of a Guild. +func (s *Session) GuildWebhooks(guildID string) (st []*Webhook, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointGuildWebhooks(guildID), nil, EndpointGuildWebhooks(guildID)) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// Webhook returns a webhook for a given ID +// webhookID: The ID of a webhook. +func (s *Session) Webhook(webhookID string) (st *Webhook, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointWebhook(webhookID), nil, EndpointWebhooks) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// WebhookWithToken returns a webhook for a given ID +// webhookID: The ID of a webhook. +// token : The auth token for the webhook. +func (s *Session) WebhookWithToken(webhookID, token string) (st *Webhook, err error) { + + body, err := s.RequestWithBucketID("GET", EndpointWebhookToken(webhookID, token), nil, EndpointWebhookToken("", "")) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// WebhookEdit updates an existing Webhook. +// webhookID: The ID of a webhook. +// name : The name of the webhook. +// avatar : The avatar of the webhook. +func (s *Session) WebhookEdit(webhookID, name, avatar string) (st *Role, err error) { + + data := struct { + Name string `json:"name,omitempty"` + Avatar string `json:"avatar,omitempty"` + }{name, avatar} + + body, err := s.RequestWithBucketID("PATCH", EndpointWebhook(webhookID), data, EndpointWebhooks) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// WebhookEditWithToken updates an existing Webhook with an auth token. +// webhookID: The ID of a webhook. +// token : The auth token for the webhook. +// name : The name of the webhook. +// avatar : The avatar of the webhook. +func (s *Session) WebhookEditWithToken(webhookID, token, name, avatar string) (st *Role, err error) { + + data := struct { + Name string `json:"name,omitempty"` + Avatar string `json:"avatar,omitempty"` + }{name, avatar} + + body, err := s.RequestWithBucketID("PATCH", EndpointWebhookToken(webhookID, token), data, EndpointWebhookToken("", "")) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// WebhookDelete deletes a webhook for a given ID +// webhookID: The ID of a webhook. +func (s *Session) WebhookDelete(webhookID string) (st *Webhook, err error) { + + body, err := s.RequestWithBucketID("DELETE", EndpointWebhook(webhookID), nil, EndpointWebhooks) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// WebhookDeleteWithToken deletes a webhook for a given ID with an auth token. +// webhookID: The ID of a webhook. +// token : The auth token for the webhook. +func (s *Session) WebhookDeleteWithToken(webhookID, token string) (st *Webhook, err error) { + + body, err := s.RequestWithBucketID("DELETE", EndpointWebhookToken(webhookID, token), nil, EndpointWebhookToken("", "")) + if err != nil { + return + } + + err = unmarshal(body, &st) + + return +} + +// WebhookExecute executes a webhook. +// webhookID: The ID of a webhook. +// token : The auth token for the webhook +func (s *Session) WebhookExecute(webhookID, token string, wait bool, data *WebhookParams) (err error) { + uri := EndpointWebhookToken(webhookID, token) + + if wait { + uri += "?wait=true" + } + + _, err = s.RequestWithBucketID("POST", uri, data, EndpointWebhookToken("", "")) + + return +} + +// MessageReactionAdd creates an emoji reaction to a message. +// channelID : The channel ID. +// messageID : The message ID. +// emojiID : Either the unicode emoji for the reaction, or a guild emoji identifier. +func (s *Session) MessageReactionAdd(channelID, messageID, emojiID string) error { + + _, err := s.RequestWithBucketID("PUT", EndpointMessageReaction(channelID, messageID, emojiID, "@me"), nil, EndpointMessageReaction(channelID, "", "", "")) + + return err +} + +// MessageReactionRemove deletes an emoji reaction to a message. +// channelID : The channel ID. +// messageID : The message ID. +// emojiID : Either the unicode emoji for the reaction, or a guild emoji identifier. +// userID : @me or ID of the user to delete the reaction for. +func (s *Session) MessageReactionRemove(channelID, messageID, emojiID, userID string) error { + + _, err := s.RequestWithBucketID("DELETE", EndpointMessageReaction(channelID, messageID, emojiID, userID), nil, EndpointMessageReaction(channelID, "", "", "")) + + return err +} + +// MessageReactions gets all the users reactions for a specific emoji. +// channelID : The channel ID. +// messageID : The message ID. +// emojiID : Either the unicode emoji for the reaction, or a guild emoji identifier. +// limit : max number of users to return (max 100) +func (s *Session) MessageReactions(channelID, messageID, emojiID string, limit int) (st []*User, err error) { + uri := EndpointMessageReactions(channelID, messageID, emojiID) + + v := url.Values{} + + if limit > 0 { + v.Set("limit", strconv.Itoa(limit)) + } + + if len(v) > 0 { + uri = fmt.Sprintf("%s?%s", uri, v.Encode()) + } + + body, err := s.RequestWithBucketID("GET", uri, nil, EndpointMessageReaction(channelID, "", "", "")) + if err != nil { + return + } + + err = unmarshal(body, &st) + return +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to user notes +// ------------------------------------------------------------------------------------------------ + +// UserNoteSet sets the note for a specific user. +func (s *Session) UserNoteSet(userID string, message string) (err error) { + data := struct { + Note string `json:"note"` + }{message} + + _, err = s.RequestWithBucketID("PUT", EndpointUserNotes(userID), data, EndpointUserNotes("")) + return +} + +// ------------------------------------------------------------------------------------------------ +// Functions specific to Discord Relationships (Friends list) +// ------------------------------------------------------------------------------------------------ + +// RelationshipsGet returns an array of all the relationships of the user. +func (s *Session) RelationshipsGet() (r []*Relationship, err error) { + body, err := s.RequestWithBucketID("GET", EndpointRelationships(), nil, EndpointRelationships()) + if err != nil { + return + } + + err = unmarshal(body, &r) + return +} + +// relationshipCreate creates a new relationship. (I.e. send or accept a friend request, block a user.) +// relationshipType : 1 = friend, 2 = blocked, 3 = incoming friend req, 4 = sent friend req +func (s *Session) relationshipCreate(userID string, relationshipType int) (err error) { + data := struct { + Type int `json:"type"` + }{relationshipType} + + _, err = s.RequestWithBucketID("PUT", EndpointRelationship(userID), data, EndpointRelationships()) + return +} + +// RelationshipFriendRequestSend sends a friend request to a user. +// userID: ID of the user. +func (s *Session) RelationshipFriendRequestSend(userID string) (err error) { + err = s.relationshipCreate(userID, 4) + return +} + +// RelationshipFriendRequestAccept accepts a friend request from a user. +// userID: ID of the user. +func (s *Session) RelationshipFriendRequestAccept(userID string) (err error) { + err = s.relationshipCreate(userID, 1) + return +} + +// RelationshipUserBlock blocks a user. +// userID: ID of the user. +func (s *Session) RelationshipUserBlock(userID string) (err error) { + err = s.relationshipCreate(userID, 2) + return +} + +// RelationshipDelete removes the relationship with a user. +// userID: ID of the user. +func (s *Session) RelationshipDelete(userID string) (err error) { + _, err = s.RequestWithBucketID("DELETE", EndpointRelationship(userID), nil, EndpointRelationships()) + return +} + +// RelationshipsMutualGet returns an array of all the users both @me and the given user is friends with. +// userID: ID of the user. +func (s *Session) RelationshipsMutualGet(userID string) (mf []*User, err error) { + body, err := s.RequestWithBucketID("GET", EndpointRelationshipsMutual(userID), nil, EndpointRelationshipsMutual(userID)) + if err != nil { + return + } + + err = unmarshal(body, &mf) + return +} diff --git a/vendor/github.com/bwmarrin/discordgo/state.go b/vendor/github.com/bwmarrin/discordgo/state.go new file mode 100644 index 0000000..7400ef6 --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/state.go @@ -0,0 +1,949 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains code related to state tracking. If enabled, state +// tracking will capture the initial READY packet and many other websocket +// events and maintain an in-memory state of of guilds, channels, users, and +// so forth. This information can be accessed through the Session.State struct. + +package discordgo + +import ( + "errors" + "sort" + "sync" +) + +// ErrNilState is returned when the state is nil. +var ErrNilState = errors.New("state not instantiated, please use discordgo.New() or assign Session.State") + +// ErrStateNotFound is returned when the state cache +// requested is not found +var ErrStateNotFound = errors.New("state cache not found") + +// A State contains the current known state. +// As discord sends this in a READY blob, it seems reasonable to simply +// use that struct as the data store. +type State struct { + sync.RWMutex + Ready + + MaxMessageCount int + TrackChannels bool + TrackEmojis bool + TrackMembers bool + TrackRoles bool + TrackVoice bool + TrackPresences bool + + guildMap map[string]*Guild + channelMap map[string]*Channel +} + +// NewState creates an empty state. +func NewState() *State { + return &State{ + Ready: Ready{ + PrivateChannels: []*Channel{}, + Guilds: []*Guild{}, + }, + TrackChannels: true, + TrackEmojis: true, + TrackMembers: true, + TrackRoles: true, + TrackVoice: true, + TrackPresences: true, + guildMap: make(map[string]*Guild), + channelMap: make(map[string]*Channel), + } +} + +// GuildAdd adds a guild to the current world state, or +// updates it if it already exists. +func (s *State) GuildAdd(guild *Guild) error { + if s == nil { + return ErrNilState + } + + s.Lock() + defer s.Unlock() + + // Update the channels to point to the right guild, adding them to the channelMap as we go + for _, c := range guild.Channels { + s.channelMap[c.ID] = c + } + + if g, ok := s.guildMap[guild.ID]; ok { + // We are about to replace `g` in the state with `guild`, but first we need to + // make sure we preserve any fields that the `guild` doesn't contain from `g`. + if guild.Roles == nil { + guild.Roles = g.Roles + } + if guild.Emojis == nil { + guild.Emojis = g.Emojis + } + if guild.Members == nil { + guild.Members = g.Members + } + if guild.Presences == nil { + guild.Presences = g.Presences + } + if guild.Channels == nil { + guild.Channels = g.Channels + } + if guild.VoiceStates == nil { + guild.VoiceStates = g.VoiceStates + } + *g = *guild + return nil + } + + s.Guilds = append(s.Guilds, guild) + s.guildMap[guild.ID] = guild + + return nil +} + +// GuildRemove removes a guild from current world state. +func (s *State) GuildRemove(guild *Guild) error { + if s == nil { + return ErrNilState + } + + _, err := s.Guild(guild.ID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + delete(s.guildMap, guild.ID) + + for i, g := range s.Guilds { + if g.ID == guild.ID { + s.Guilds = append(s.Guilds[:i], s.Guilds[i+1:]...) + return nil + } + } + + return nil +} + +// Guild gets a guild by ID. +// Useful for querying if @me is in a guild: +// _, err := discordgo.Session.State.Guild(guildID) +// isInGuild := err == nil +func (s *State) Guild(guildID string) (*Guild, error) { + if s == nil { + return nil, ErrNilState + } + + s.RLock() + defer s.RUnlock() + + if g, ok := s.guildMap[guildID]; ok { + return g, nil + } + + return nil, ErrStateNotFound +} + +// PresenceAdd adds a presence to the current world state, or +// updates it if it already exists. +func (s *State) PresenceAdd(guildID string, presence *Presence) error { + if s == nil { + return ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, p := range guild.Presences { + if p.User.ID == presence.User.ID { + //guild.Presences[i] = presence + + //Update status + guild.Presences[i].Game = presence.Game + guild.Presences[i].Roles = presence.Roles + if presence.Status != "" { + guild.Presences[i].Status = presence.Status + } + if presence.Nick != "" { + guild.Presences[i].Nick = presence.Nick + } + + //Update the optionally sent user information + //ID Is a mandatory field so you should not need to check if it is empty + guild.Presences[i].User.ID = presence.User.ID + + if presence.User.Avatar != "" { + guild.Presences[i].User.Avatar = presence.User.Avatar + } + if presence.User.Discriminator != "" { + guild.Presences[i].User.Discriminator = presence.User.Discriminator + } + if presence.User.Email != "" { + guild.Presences[i].User.Email = presence.User.Email + } + if presence.User.Token != "" { + guild.Presences[i].User.Token = presence.User.Token + } + if presence.User.Username != "" { + guild.Presences[i].User.Username = presence.User.Username + } + + return nil + } + } + + guild.Presences = append(guild.Presences, presence) + return nil +} + +// PresenceRemove removes a presence from the current world state. +func (s *State) PresenceRemove(guildID string, presence *Presence) error { + if s == nil { + return ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, p := range guild.Presences { + if p.User.ID == presence.User.ID { + guild.Presences = append(guild.Presences[:i], guild.Presences[i+1:]...) + return nil + } + } + + return ErrStateNotFound +} + +// Presence gets a presence by ID from a guild. +func (s *State) Presence(guildID, userID string) (*Presence, error) { + if s == nil { + return nil, ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return nil, err + } + + for _, p := range guild.Presences { + if p.User.ID == userID { + return p, nil + } + } + + return nil, ErrStateNotFound +} + +// TODO: Consider moving Guild state update methods onto *Guild. + +// MemberAdd adds a member to the current world state, or +// updates it if it already exists. +func (s *State) MemberAdd(member *Member) error { + if s == nil { + return ErrNilState + } + + guild, err := s.Guild(member.GuildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, m := range guild.Members { + if m.User.ID == member.User.ID { + guild.Members[i] = member + return nil + } + } + + guild.Members = append(guild.Members, member) + return nil +} + +// MemberRemove removes a member from current world state. +func (s *State) MemberRemove(member *Member) error { + if s == nil { + return ErrNilState + } + + guild, err := s.Guild(member.GuildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, m := range guild.Members { + if m.User.ID == member.User.ID { + guild.Members = append(guild.Members[:i], guild.Members[i+1:]...) + return nil + } + } + + return ErrStateNotFound +} + +// Member gets a member by ID from a guild. +func (s *State) Member(guildID, userID string) (*Member, error) { + if s == nil { + return nil, ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return nil, err + } + + s.RLock() + defer s.RUnlock() + + for _, m := range guild.Members { + if m.User.ID == userID { + return m, nil + } + } + + return nil, ErrStateNotFound +} + +// RoleAdd adds a role to the current world state, or +// updates it if it already exists. +func (s *State) RoleAdd(guildID string, role *Role) error { + if s == nil { + return ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, r := range guild.Roles { + if r.ID == role.ID { + guild.Roles[i] = role + return nil + } + } + + guild.Roles = append(guild.Roles, role) + return nil +} + +// RoleRemove removes a role from current world state by ID. +func (s *State) RoleRemove(guildID, roleID string) error { + if s == nil { + return ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, r := range guild.Roles { + if r.ID == roleID { + guild.Roles = append(guild.Roles[:i], guild.Roles[i+1:]...) + return nil + } + } + + return ErrStateNotFound +} + +// Role gets a role by ID from a guild. +func (s *State) Role(guildID, roleID string) (*Role, error) { + if s == nil { + return nil, ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return nil, err + } + + s.RLock() + defer s.RUnlock() + + for _, r := range guild.Roles { + if r.ID == roleID { + return r, nil + } + } + + return nil, ErrStateNotFound +} + +// ChannelAdd adds a channel to the current world state, or +// updates it if it already exists. +// Channels may exist either as PrivateChannels or inside +// a guild. +func (s *State) ChannelAdd(channel *Channel) error { + if s == nil { + return ErrNilState + } + + s.Lock() + defer s.Unlock() + + // If the channel exists, replace it + if c, ok := s.channelMap[channel.ID]; ok { + if channel.Messages == nil { + channel.Messages = c.Messages + } + if channel.PermissionOverwrites == nil { + channel.PermissionOverwrites = c.PermissionOverwrites + } + + *c = *channel + return nil + } + + if channel.IsPrivate { + s.PrivateChannels = append(s.PrivateChannels, channel) + } else { + guild, ok := s.guildMap[channel.GuildID] + if !ok { + return ErrStateNotFound + } + + guild.Channels = append(guild.Channels, channel) + } + + s.channelMap[channel.ID] = channel + + return nil +} + +// ChannelRemove removes a channel from current world state. +func (s *State) ChannelRemove(channel *Channel) error { + if s == nil { + return ErrNilState + } + + _, err := s.Channel(channel.ID) + if err != nil { + return err + } + + if channel.IsPrivate { + s.Lock() + defer s.Unlock() + + for i, c := range s.PrivateChannels { + if c.ID == channel.ID { + s.PrivateChannels = append(s.PrivateChannels[:i], s.PrivateChannels[i+1:]...) + break + } + } + } else { + guild, err := s.Guild(channel.GuildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, c := range guild.Channels { + if c.ID == channel.ID { + guild.Channels = append(guild.Channels[:i], guild.Channels[i+1:]...) + break + } + } + } + + delete(s.channelMap, channel.ID) + + return nil +} + +// GuildChannel gets a channel by ID from a guild. +// This method is Deprecated, use Channel(channelID) +func (s *State) GuildChannel(guildID, channelID string) (*Channel, error) { + return s.Channel(channelID) +} + +// PrivateChannel gets a private channel by ID. +// This method is Deprecated, use Channel(channelID) +func (s *State) PrivateChannel(channelID string) (*Channel, error) { + return s.Channel(channelID) +} + +// Channel gets a channel by ID, it will look in all guilds an private channels. +func (s *State) Channel(channelID string) (*Channel, error) { + if s == nil { + return nil, ErrNilState + } + + s.RLock() + defer s.RUnlock() + + if c, ok := s.channelMap[channelID]; ok { + return c, nil + } + + return nil, ErrStateNotFound +} + +// Emoji returns an emoji for a guild and emoji id. +func (s *State) Emoji(guildID, emojiID string) (*Emoji, error) { + if s == nil { + return nil, ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return nil, err + } + + s.RLock() + defer s.RUnlock() + + for _, e := range guild.Emojis { + if e.ID == emojiID { + return e, nil + } + } + + return nil, ErrStateNotFound +} + +// EmojiAdd adds an emoji to the current world state. +func (s *State) EmojiAdd(guildID string, emoji *Emoji) error { + if s == nil { + return ErrNilState + } + + guild, err := s.Guild(guildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, e := range guild.Emojis { + if e.ID == emoji.ID { + guild.Emojis[i] = emoji + return nil + } + } + + guild.Emojis = append(guild.Emojis, emoji) + return nil +} + +// EmojisAdd adds multiple emojis to the world state. +func (s *State) EmojisAdd(guildID string, emojis []*Emoji) error { + for _, e := range emojis { + if err := s.EmojiAdd(guildID, e); err != nil { + return err + } + } + return nil +} + +// MessageAdd adds a message to the current world state, or updates it if it exists. +// If the channel cannot be found, the message is discarded. +// Messages are kept in state up to s.MaxMessageCount +func (s *State) MessageAdd(message *Message) error { + if s == nil { + return ErrNilState + } + + c, err := s.Channel(message.ChannelID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + // If the message exists, merge in the new message contents. + for _, m := range c.Messages { + if m.ID == message.ID { + if message.Content != "" { + m.Content = message.Content + } + if message.EditedTimestamp != "" { + m.EditedTimestamp = message.EditedTimestamp + } + if message.Mentions != nil { + m.Mentions = message.Mentions + } + if message.Embeds != nil { + m.Embeds = message.Embeds + } + if message.Attachments != nil { + m.Attachments = message.Attachments + } + if message.Timestamp != "" { + m.Timestamp = message.Timestamp + } + if message.Author != nil { + m.Author = message.Author + } + + return nil + } + } + + c.Messages = append(c.Messages, message) + + if len(c.Messages) > s.MaxMessageCount { + c.Messages = c.Messages[len(c.Messages)-s.MaxMessageCount:] + } + return nil +} + +// MessageRemove removes a message from the world state. +func (s *State) MessageRemove(message *Message) error { + if s == nil { + return ErrNilState + } + + return s.messageRemoveByID(message.ChannelID, message.ID) +} + +// messageRemoveByID removes a message by channelID and messageID from the world state. +func (s *State) messageRemoveByID(channelID, messageID string) error { + c, err := s.Channel(channelID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + for i, m := range c.Messages { + if m.ID == messageID { + c.Messages = append(c.Messages[:i], c.Messages[i+1:]...) + return nil + } + } + + return ErrStateNotFound +} + +func (s *State) voiceStateUpdate(update *VoiceStateUpdate) error { + guild, err := s.Guild(update.GuildID) + if err != nil { + return err + } + + s.Lock() + defer s.Unlock() + + // Handle Leaving Channel + if update.ChannelID == "" { + for i, state := range guild.VoiceStates { + if state.UserID == update.UserID { + guild.VoiceStates = append(guild.VoiceStates[:i], guild.VoiceStates[i+1:]...) + return nil + } + } + } else { + for i, state := range guild.VoiceStates { + if state.UserID == update.UserID { + guild.VoiceStates[i] = update.VoiceState + return nil + } + } + + guild.VoiceStates = append(guild.VoiceStates, update.VoiceState) + } + + return nil +} + +// Message gets a message by channel and message ID. +func (s *State) Message(channelID, messageID string) (*Message, error) { + if s == nil { + return nil, ErrNilState + } + + c, err := s.Channel(channelID) + if err != nil { + return nil, err + } + + s.RLock() + defer s.RUnlock() + + for _, m := range c.Messages { + if m.ID == messageID { + return m, nil + } + } + + return nil, ErrStateNotFound +} + +// OnReady takes a Ready event and updates all internal state. +func (s *State) onReady(se *Session, r *Ready) (err error) { + if s == nil { + return ErrNilState + } + + s.Lock() + defer s.Unlock() + + // We must track at least the current user for Voice, even + // if state is disabled, store the bare essentials. + if !se.StateEnabled { + ready := Ready{ + Version: r.Version, + SessionID: r.SessionID, + User: r.User, + } + + s.Ready = ready + + return nil + } + + s.Ready = *r + + for _, g := range s.Guilds { + s.guildMap[g.ID] = g + + for _, c := range g.Channels { + s.channelMap[c.ID] = c + } + } + + for _, c := range s.PrivateChannels { + s.channelMap[c.ID] = c + } + + return nil +} + +// onInterface handles all events related to states. +func (s *State) onInterface(se *Session, i interface{}) (err error) { + if s == nil { + return ErrNilState + } + + r, ok := i.(*Ready) + if ok { + return s.onReady(se, r) + } + + if !se.StateEnabled { + return nil + } + + switch t := i.(type) { + case *GuildCreate: + err = s.GuildAdd(t.Guild) + case *GuildUpdate: + err = s.GuildAdd(t.Guild) + case *GuildDelete: + err = s.GuildRemove(t.Guild) + case *GuildMemberAdd: + if s.TrackMembers { + err = s.MemberAdd(t.Member) + } + case *GuildMemberUpdate: + if s.TrackMembers { + err = s.MemberAdd(t.Member) + } + case *GuildMemberRemove: + if s.TrackMembers { + err = s.MemberRemove(t.Member) + } + case *GuildRoleCreate: + if s.TrackRoles { + err = s.RoleAdd(t.GuildID, t.Role) + } + case *GuildRoleUpdate: + if s.TrackRoles { + err = s.RoleAdd(t.GuildID, t.Role) + } + case *GuildRoleDelete: + if s.TrackRoles { + err = s.RoleRemove(t.GuildID, t.RoleID) + } + case *GuildEmojisUpdate: + if s.TrackEmojis { + err = s.EmojisAdd(t.GuildID, t.Emojis) + } + case *ChannelCreate: + if s.TrackChannels { + err = s.ChannelAdd(t.Channel) + } + case *ChannelUpdate: + if s.TrackChannels { + err = s.ChannelAdd(t.Channel) + } + case *ChannelDelete: + if s.TrackChannels { + err = s.ChannelRemove(t.Channel) + } + case *MessageCreate: + if s.MaxMessageCount != 0 { + err = s.MessageAdd(t.Message) + } + case *MessageUpdate: + if s.MaxMessageCount != 0 { + err = s.MessageAdd(t.Message) + } + case *MessageDelete: + if s.MaxMessageCount != 0 { + err = s.MessageRemove(t.Message) + } + case *MessageDeleteBulk: + if s.MaxMessageCount != 0 { + for _, mID := range t.Messages { + s.messageRemoveByID(t.ChannelID, mID) + } + } + case *VoiceStateUpdate: + if s.TrackVoice { + err = s.voiceStateUpdate(t) + } + case *PresenceUpdate: + if s.TrackPresences { + s.PresenceAdd(t.GuildID, &t.Presence) + } + if s.TrackMembers { + if t.Status == StatusOffline { + return + } + + var m *Member + m, err = s.Member(t.GuildID, t.User.ID) + + if err != nil { + // Member not found; this is a user coming online + m = &Member{ + GuildID: t.GuildID, + Nick: t.Nick, + User: t.User, + Roles: t.Roles, + } + + } else { + + if t.Nick != "" { + m.Nick = t.Nick + } + + if t.User.Username != "" { + m.User.Username = t.User.Username + } + + // PresenceUpdates always contain a list of roles, so there's no need to check for an empty list here + m.Roles = t.Roles + + } + + err = s.MemberAdd(m) + } + + } + + return +} + +// UserChannelPermissions returns the permission of a user in a channel. +// userID : The ID of the user to calculate permissions for. +// channelID : The ID of the channel to calculate permission for. +func (s *State) UserChannelPermissions(userID, channelID string) (apermissions int, err error) { + if s == nil { + return 0, ErrNilState + } + + channel, err := s.Channel(channelID) + if err != nil { + return + } + + guild, err := s.Guild(channel.GuildID) + if err != nil { + return + } + + if userID == guild.OwnerID { + apermissions = PermissionAll + return + } + + member, err := s.Member(guild.ID, userID) + if err != nil { + return + } + + return memberPermissions(guild, channel, member), nil +} + +// UserColor returns the color of a user in a channel. +// While colors are defined at a Guild level, determining for a channel is more useful in message handlers. +// 0 is returned in cases of error, which is the color of @everyone. +// userID : The ID of the user to calculate the color for. +// channelID : The ID of the channel to calculate the color for. +func (s *State) UserColor(userID, channelID string) int { + if s == nil { + return 0 + } + + channel, err := s.Channel(channelID) + if err != nil { + return 0 + } + + guild, err := s.Guild(channel.GuildID) + if err != nil { + return 0 + } + + member, err := s.Member(guild.ID, userID) + if err != nil { + return 0 + } + + roles := Roles(guild.Roles) + sort.Sort(roles) + + for _, role := range roles { + for _, roleID := range member.Roles { + if role.ID == roleID { + if role.Color != 0 { + return role.Color + } + } + } + } + + return 0 +} diff --git a/vendor/github.com/bwmarrin/discordgo/structs.go b/vendor/github.com/bwmarrin/discordgo/structs.go new file mode 100644 index 0000000..32f435c --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/structs.go @@ -0,0 +1,581 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains all structures for the discordgo package. These +// may be moved about later into separate files but I find it easier to have +// them all located together. + +package discordgo + +import ( + "encoding/json" + "net/http" + "strconv" + "sync" + "time" + + "github.com/gorilla/websocket" +) + +// A Session represents a connection to the Discord API. +type Session struct { + sync.RWMutex + + // General configurable settings. + + // Authentication token for this session + Token string + MFA bool + + // Debug for printing JSON request/responses + Debug bool // Deprecated, will be removed. + LogLevel int + + // Should the session reconnect the websocket on errors. + ShouldReconnectOnError bool + + // Should the session request compressed websocket data. + Compress bool + + // Sharding + ShardID int + ShardCount int + + // Should state tracking be enabled. + // State tracking is the best way for getting the the users + // active guilds and the members of the guilds. + StateEnabled bool + + // Exposed but should not be modified by User. + + // Whether the Data Websocket is ready + DataReady bool // NOTE: Maye be deprecated soon + + // Max number of REST API retries + MaxRestRetries int + + // Status stores the currect status of the websocket connection + // this is being tested, may stay, may go away. + status int32 + + // Whether the Voice Websocket is ready + VoiceReady bool // NOTE: Deprecated. + + // Whether the UDP Connection is ready + UDPReady bool // NOTE: Deprecated + + // Stores a mapping of guild id's to VoiceConnections + VoiceConnections map[string]*VoiceConnection + + // Managed state object, updated internally with events when + // StateEnabled is true. + State *State + + // The http client used for REST requests + Client *http.Client + + // Event handlers + handlersMu sync.RWMutex + handlers map[string][]*eventHandlerInstance + onceHandlers map[string][]*eventHandlerInstance + + // The websocket connection. + wsConn *websocket.Conn + + // When nil, the session is not listening. + listening chan interface{} + + // used to deal with rate limits + ratelimiter *RateLimiter + + // sequence tracks the current gateway api websocket sequence number + sequence *int64 + + // stores sessions current Discord Gateway + gateway string + + // stores session ID of current Gateway connection + sessionID string + + // used to make sure gateway websocket writes do not happen concurrently + wsMutex sync.Mutex +} + +// A VoiceRegion stores data for a specific voice region server. +type VoiceRegion struct { + ID string `json:"id"` + Name string `json:"name"` + Hostname string `json:"sample_hostname"` + Port int `json:"sample_port"` +} + +// A VoiceICE stores data for voice ICE servers. +type VoiceICE struct { + TTL string `json:"ttl"` + Servers []*ICEServer `json:"servers"` +} + +// A ICEServer stores data for a specific voice ICE server. +type ICEServer struct { + URL string `json:"url"` + Username string `json:"username"` + Credential string `json:"credential"` +} + +// A Invite stores all data related to a specific Discord Guild or Channel invite. +type Invite struct { + Guild *Guild `json:"guild"` + Channel *Channel `json:"channel"` + Inviter *User `json:"inviter"` + Code string `json:"code"` + CreatedAt Timestamp `json:"created_at"` + MaxAge int `json:"max_age"` + Uses int `json:"uses"` + MaxUses int `json:"max_uses"` + XkcdPass string `json:"xkcdpass"` + Revoked bool `json:"revoked"` + Temporary bool `json:"temporary"` +} + +// A Channel holds all data related to an individual Discord channel. +type Channel struct { + ID string `json:"id"` + GuildID string `json:"guild_id"` + Name string `json:"name"` + Topic string `json:"topic"` + Type string `json:"type"` + LastMessageID string `json:"last_message_id"` + Position int `json:"position"` + Bitrate int `json:"bitrate"` + IsPrivate bool `json:"is_private"` + Recipient *User `json:"recipient"` + Messages []*Message `json:"-"` + PermissionOverwrites []*PermissionOverwrite `json:"permission_overwrites"` +} + +// A PermissionOverwrite holds permission overwrite data for a Channel +type PermissionOverwrite struct { + ID string `json:"id"` + Type string `json:"type"` + Deny int `json:"deny"` + Allow int `json:"allow"` +} + +// Emoji struct holds data related to Emoji's +type Emoji struct { + ID string `json:"id"` + Name string `json:"name"` + Roles []string `json:"roles"` + Managed bool `json:"managed"` + RequireColons bool `json:"require_colons"` +} + +// APIName returns an correctly formatted API name for use in the MessageReactions endpoints. +func (e *Emoji) APIName() string { + if e.ID != "" && e.Name != "" { + return e.Name + ":" + e.ID + } + if e.Name != "" { + return e.Name + } + return e.ID +} + +// VerificationLevel type defination +type VerificationLevel int + +// Constants for VerificationLevel levels from 0 to 3 inclusive +const ( + VerificationLevelNone VerificationLevel = iota + VerificationLevelLow + VerificationLevelMedium + VerificationLevelHigh +) + +// A Guild holds all data related to a specific Discord Guild. Guilds are also +// sometimes referred to as Servers in the Discord client. +type Guild struct { + ID string `json:"id"` + Name string `json:"name"` + Icon string `json:"icon"` + Region string `json:"region"` + AfkChannelID string `json:"afk_channel_id"` + EmbedChannelID string `json:"embed_channel_id"` + OwnerID string `json:"owner_id"` + JoinedAt Timestamp `json:"joined_at"` + Splash string `json:"splash"` + AfkTimeout int `json:"afk_timeout"` + MemberCount int `json:"member_count"` + VerificationLevel VerificationLevel `json:"verification_level"` + EmbedEnabled bool `json:"embed_enabled"` + Large bool `json:"large"` // ?? + DefaultMessageNotifications int `json:"default_message_notifications"` + Roles []*Role `json:"roles"` + Emojis []*Emoji `json:"emojis"` + Members []*Member `json:"members"` + Presences []*Presence `json:"presences"` + Channels []*Channel `json:"channels"` + VoiceStates []*VoiceState `json:"voice_states"` + Unavailable bool `json:"unavailable"` +} + +// A UserGuild holds a brief version of a Guild +type UserGuild struct { + ID string `json:"id"` + Name string `json:"name"` + Icon string `json:"icon"` + Owner bool `json:"owner"` + Permissions int `json:"permissions"` +} + +// A GuildParams stores all the data needed to update discord guild settings +type GuildParams struct { + Name string `json:"name,omitempty"` + Region string `json:"region,omitempty"` + VerificationLevel *VerificationLevel `json:"verification_level,omitempty"` + DefaultMessageNotifications int `json:"default_message_notifications,omitempty"` // TODO: Separate type? + AfkChannelID string `json:"afk_channel_id,omitempty"` + AfkTimeout int `json:"afk_timeout,omitempty"` + Icon string `json:"icon,omitempty"` + OwnerID string `json:"owner_id,omitempty"` + Splash string `json:"splash,omitempty"` +} + +// A Role stores information about Discord guild member roles. +type Role struct { + ID string `json:"id"` + Name string `json:"name"` + Managed bool `json:"managed"` + Mentionable bool `json:"mentionable"` + Hoist bool `json:"hoist"` + Color int `json:"color"` + Position int `json:"position"` + Permissions int `json:"permissions"` +} + +// Roles are a collection of Role +type Roles []*Role + +func (r Roles) Len() int { + return len(r) +} + +func (r Roles) Less(i, j int) bool { + return r[i].Position > r[j].Position +} + +func (r Roles) Swap(i, j int) { + r[i], r[j] = r[j], r[i] +} + +// A VoiceState stores the voice states of Guilds +type VoiceState struct { + UserID string `json:"user_id"` + SessionID string `json:"session_id"` + ChannelID string `json:"channel_id"` + GuildID string `json:"guild_id"` + Suppress bool `json:"suppress"` + SelfMute bool `json:"self_mute"` + SelfDeaf bool `json:"self_deaf"` + Mute bool `json:"mute"` + Deaf bool `json:"deaf"` +} + +// A Presence stores the online, offline, or idle and game status of Guild members. +type Presence struct { + User *User `json:"user"` + Status Status `json:"status"` + Game *Game `json:"game"` + Nick string `json:"nick"` + Roles []string `json:"roles"` +} + +// A Game struct holds the name of the "playing .." game for a user +type Game struct { + Name string `json:"name"` + Type int `json:"type"` + URL string `json:"url"` +} + +// UnmarshalJSON unmarshals json to Game struct +func (g *Game) UnmarshalJSON(bytes []byte) error { + temp := &struct { + Name json.Number `json:"name"` + Type json.RawMessage `json:"type"` + URL string `json:"url"` + }{} + err := json.Unmarshal(bytes, temp) + if err != nil { + return err + } + g.URL = temp.URL + g.Name = temp.Name.String() + + if temp.Type != nil { + err = json.Unmarshal(temp.Type, &g.Type) + if err == nil { + return nil + } + + s := "" + err = json.Unmarshal(temp.Type, &s) + if err == nil { + g.Type, err = strconv.Atoi(s) + } + + return err + } + + return nil +} + +// A Member stores user information for Guild members. +type Member struct { + GuildID string `json:"guild_id"` + JoinedAt string `json:"joined_at"` + Nick string `json:"nick"` + Deaf bool `json:"deaf"` + Mute bool `json:"mute"` + User *User `json:"user"` + Roles []string `json:"roles"` +} + +// A Settings stores data for a specific users Discord client settings. +type Settings struct { + RenderEmbeds bool `json:"render_embeds"` + InlineEmbedMedia bool `json:"inline_embed_media"` + InlineAttachmentMedia bool `json:"inline_attachment_media"` + EnableTtsCommand bool `json:"enable_tts_command"` + MessageDisplayCompact bool `json:"message_display_compact"` + ShowCurrentGame bool `json:"show_current_game"` + ConvertEmoticons bool `json:"convert_emoticons"` + Locale string `json:"locale"` + Theme string `json:"theme"` + GuildPositions []string `json:"guild_positions"` + RestrictedGuilds []string `json:"restricted_guilds"` + FriendSourceFlags *FriendSourceFlags `json:"friend_source_flags"` + Status Status `json:"status"` + DetectPlatformAccounts bool `json:"detect_platform_accounts"` + DeveloperMode bool `json:"developer_mode"` +} + +// Status type defination +type Status string + +// Constants for Status with the different current available status +const ( + StatusOnline Status = "online" + StatusIdle Status = "idle" + StatusDoNotDisturb Status = "dnd" + StatusInvisible Status = "invisible" + StatusOffline Status = "offline" +) + +// FriendSourceFlags stores ... TODO :) +type FriendSourceFlags struct { + All bool `json:"all"` + MutualGuilds bool `json:"mutual_guilds"` + MutualFriends bool `json:"mutual_friends"` +} + +// A Relationship between the logged in user and Relationship.User +type Relationship struct { + User *User `json:"user"` + Type int `json:"type"` // 1 = friend, 2 = blocked, 3 = incoming friend req, 4 = sent friend req + ID string `json:"id"` +} + +// A TooManyRequests struct holds information received from Discord +// when receiving a HTTP 429 response. +type TooManyRequests struct { + Bucket string `json:"bucket"` + Message string `json:"message"` + RetryAfter time.Duration `json:"retry_after"` +} + +// A ReadState stores data on the read state of channels. +type ReadState struct { + MentionCount int `json:"mention_count"` + LastMessageID string `json:"last_message_id"` + ID string `json:"id"` +} + +// An Ack is used to ack messages +type Ack struct { + Token string `json:"token"` +} + +// A GuildRole stores data for guild roles. +type GuildRole struct { + Role *Role `json:"role"` + GuildID string `json:"guild_id"` +} + +// A GuildBan stores data for a guild ban. +type GuildBan struct { + Reason string `json:"reason"` + User *User `json:"user"` +} + +// A GuildIntegration stores data for a guild integration. +type GuildIntegration struct { + ID string `json:"id"` + Name string `json:"name"` + Type string `json:"type"` + Enabled bool `json:"enabled"` + Syncing bool `json:"syncing"` + RoleID string `json:"role_id"` + ExpireBehavior int `json:"expire_behavior"` + ExpireGracePeriod int `json:"expire_grace_period"` + User *User `json:"user"` + Account *GuildIntegrationAccount `json:"account"` + SyncedAt int `json:"synced_at"` +} + +// A GuildIntegrationAccount stores data for a guild integration account. +type GuildIntegrationAccount struct { + ID string `json:"id"` + Name string `json:"name"` +} + +// A GuildEmbed stores data for a guild embed. +type GuildEmbed struct { + Enabled bool `json:"enabled"` + ChannelID string `json:"channel_id"` +} + +// A UserGuildSettingsChannelOverride stores data for a channel override for a users guild settings. +type UserGuildSettingsChannelOverride struct { + Muted bool `json:"muted"` + MessageNotifications int `json:"message_notifications"` + ChannelID string `json:"channel_id"` +} + +// A UserGuildSettings stores data for a users guild settings. +type UserGuildSettings struct { + SupressEveryone bool `json:"suppress_everyone"` + Muted bool `json:"muted"` + MobilePush bool `json:"mobile_push"` + MessageNotifications int `json:"message_notifications"` + GuildID string `json:"guild_id"` + ChannelOverrides []*UserGuildSettingsChannelOverride `json:"channel_overrides"` +} + +// A UserGuildSettingsEdit stores data for editing UserGuildSettings +type UserGuildSettingsEdit struct { + SupressEveryone bool `json:"suppress_everyone"` + Muted bool `json:"muted"` + MobilePush bool `json:"mobile_push"` + MessageNotifications int `json:"message_notifications"` + ChannelOverrides map[string]*UserGuildSettingsChannelOverride `json:"channel_overrides"` +} + +// An APIErrorMessage is an api error message returned from discord +type APIErrorMessage struct { + Code int `json:"code"` + Message string `json:"message"` +} + +// Webhook stores the data for a webhook. +type Webhook struct { + ID string `json:"id"` + GuildID string `json:"guild_id"` + ChannelID string `json:"channel_id"` + User *User `json:"user"` + Name string `json:"name"` + Avatar string `json:"avatar"` + Token string `json:"token"` +} + +// WebhookParams is a struct for webhook params, used in the WebhookExecute command. +type WebhookParams struct { + Content string `json:"content,omitempty"` + Username string `json:"username,omitempty"` + AvatarURL string `json:"avatar_url,omitempty"` + TTS bool `json:"tts,omitempty"` + File string `json:"file,omitempty"` + Embeds []*MessageEmbed `json:"embeds,omitempty"` +} + +// MessageReaction stores the data for a message reaction. +type MessageReaction struct { + UserID string `json:"user_id"` + MessageID string `json:"message_id"` + Emoji Emoji `json:"emoji"` + ChannelID string `json:"channel_id"` +} + +// Constants for the different bit offsets of text channel permissions +const ( + PermissionReadMessages = 1 << (iota + 10) + PermissionSendMessages + PermissionSendTTSMessages + PermissionManageMessages + PermissionEmbedLinks + PermissionAttachFiles + PermissionReadMessageHistory + PermissionMentionEveryone + PermissionUseExternalEmojis +) + +// Constants for the different bit offsets of voice permissions +const ( + PermissionVoiceConnect = 1 << (iota + 20) + PermissionVoiceSpeak + PermissionVoiceMuteMembers + PermissionVoiceDeafenMembers + PermissionVoiceMoveMembers + PermissionVoiceUseVAD +) + +// Constants for general management. +const ( + PermissionChangeNickname = 1 << (iota + 26) + PermissionManageNicknames + PermissionManageRoles + PermissionManageWebhooks + PermissionManageEmojis +) + +// Constants for the different bit offsets of general permissions +const ( + PermissionCreateInstantInvite = 1 << iota + PermissionKickMembers + PermissionBanMembers + PermissionAdministrator + PermissionManageChannels + PermissionManageServer + PermissionAddReactions + PermissionViewAuditLogs + + PermissionAllText = PermissionReadMessages | + PermissionSendMessages | + PermissionSendTTSMessages | + PermissionManageMessages | + PermissionEmbedLinks | + PermissionAttachFiles | + PermissionReadMessageHistory | + PermissionMentionEveryone + PermissionAllVoice = PermissionVoiceConnect | + PermissionVoiceSpeak | + PermissionVoiceMuteMembers | + PermissionVoiceDeafenMembers | + PermissionVoiceMoveMembers | + PermissionVoiceUseVAD + PermissionAllChannel = PermissionAllText | + PermissionAllVoice | + PermissionCreateInstantInvite | + PermissionManageRoles | + PermissionManageChannels | + PermissionAddReactions | + PermissionViewAuditLogs + PermissionAll = PermissionAllChannel | + PermissionKickMembers | + PermissionBanMembers | + PermissionManageServer | + PermissionAdministrator +) diff --git a/vendor/github.com/bwmarrin/discordgo/types.go b/vendor/github.com/bwmarrin/discordgo/types.go new file mode 100644 index 0000000..780b6bb --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/types.go @@ -0,0 +1,58 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains custom types, currently only a timestamp wrapper. + +package discordgo + +import ( + "encoding/json" + "fmt" + "net/http" + "time" +) + +// Timestamp stores a timestamp, as sent by the Discord API. +type Timestamp string + +// Parse parses a timestamp string into a time.Time object. +// The only time this can fail is if Discord changes their timestamp format. +func (t Timestamp) Parse() (time.Time, error) { + return time.Parse(time.RFC3339, string(t)) +} + +// RESTError stores error information about a request with a bad response code. +// Message is not always present, there are cases where api calls can fail +// without returning a json message. +type RESTError struct { + Request *http.Request + Response *http.Response + ResponseBody []byte + + Message *APIErrorMessage // Message may be nil. +} + +func newRestError(req *http.Request, resp *http.Response, body []byte) *RESTError { + restErr := &RESTError{ + Request: req, + Response: resp, + ResponseBody: body, + } + + // Attempt to decode the error and assume no message was provided if it fails + var msg *APIErrorMessage + err := json.Unmarshal(body, &msg) + if err == nil { + restErr.Message = msg + } + + return restErr +} + +func (r RESTError) Error() string { + return fmt.Sprintf("HTTP %s, %s", r.Response.Status, r.ResponseBody) +} diff --git a/vendor/github.com/bwmarrin/discordgo/user.go b/vendor/github.com/bwmarrin/discordgo/user.go new file mode 100644 index 0000000..b3a7e4b --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/user.go @@ -0,0 +1,26 @@ +package discordgo + +import "fmt" + +// A User stores all data for an individual Discord user. +type User struct { + ID string `json:"id"` + Email string `json:"email"` + Username string `json:"username"` + Avatar string `json:"avatar"` + Discriminator string `json:"discriminator"` + Token string `json:"token"` + Verified bool `json:"verified"` + MFAEnabled bool `json:"mfa_enabled"` + Bot bool `json:"bot"` +} + +// String returns a unique identifier of the form username#discriminator +func (u *User) String() string { + return fmt.Sprintf("%s#%s", u.Username, u.Discriminator) +} + +// Mention return a string which mentions the user +func (u *User) Mention() string { + return fmt.Sprintf("<@%s>", u.ID) +} diff --git a/vendor/github.com/bwmarrin/discordgo/voice.go b/vendor/github.com/bwmarrin/discordgo/voice.go new file mode 100644 index 0000000..da7b8c9 --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/voice.go @@ -0,0 +1,878 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains code related to Discord voice suppport + +package discordgo + +import ( + "encoding/binary" + "encoding/json" + "fmt" + "log" + "net" + "strings" + "sync" + "time" + + "github.com/gorilla/websocket" + "golang.org/x/crypto/nacl/secretbox" +) + +// ------------------------------------------------------------------------------------------------ +// Code related to both VoiceConnection Websocket and UDP connections. +// ------------------------------------------------------------------------------------------------ + +// A VoiceConnection struct holds all the data and functions related to a Discord Voice Connection. +type VoiceConnection struct { + sync.RWMutex + + Debug bool // If true, print extra logging -- DEPRECATED + LogLevel int + Ready bool // If true, voice is ready to send/receive audio + UserID string + GuildID string + ChannelID string + deaf bool + mute bool + speaking bool + reconnecting bool // If true, voice connection is trying to reconnect + + OpusSend chan []byte // Chan for sending opus audio + OpusRecv chan *Packet // Chan for receiving opus audio + + wsConn *websocket.Conn + wsMutex sync.Mutex + udpConn *net.UDPConn + session *Session + + sessionID string + token string + endpoint string + + // Used to send a close signal to goroutines + close chan struct{} + + // Used to allow blocking until connected + connected chan bool + + // Used to pass the sessionid from onVoiceStateUpdate + // sessionRecv chan string UNUSED ATM + + op4 voiceOP4 + op2 voiceOP2 + + voiceSpeakingUpdateHandlers []VoiceSpeakingUpdateHandler +} + +// VoiceSpeakingUpdateHandler type provides a function defination for the +// VoiceSpeakingUpdate event +type VoiceSpeakingUpdateHandler func(vc *VoiceConnection, vs *VoiceSpeakingUpdate) + +// Speaking sends a speaking notification to Discord over the voice websocket. +// This must be sent as true prior to sending audio and should be set to false +// once finished sending audio. +// b : Send true if speaking, false if not. +func (v *VoiceConnection) Speaking(b bool) (err error) { + + v.log(LogDebug, "called (%t)", b) + + type voiceSpeakingData struct { + Speaking bool `json:"speaking"` + Delay int `json:"delay"` + } + + type voiceSpeakingOp struct { + Op int `json:"op"` // Always 5 + Data voiceSpeakingData `json:"d"` + } + + if v.wsConn == nil { + return fmt.Errorf("no VoiceConnection websocket") + } + + data := voiceSpeakingOp{5, voiceSpeakingData{b, 0}} + v.wsMutex.Lock() + err = v.wsConn.WriteJSON(data) + v.wsMutex.Unlock() + + v.Lock() + defer v.Unlock() + if err != nil { + v.speaking = false + log.Println("Speaking() write json error:", err) + return + } + + v.speaking = b + + return +} + +// ChangeChannel sends Discord a request to change channels within a Guild +// !!! NOTE !!! This function may be removed in favour of just using ChannelVoiceJoin +func (v *VoiceConnection) ChangeChannel(channelID string, mute, deaf bool) (err error) { + + v.log(LogInformational, "called") + + data := voiceChannelJoinOp{4, voiceChannelJoinData{&v.GuildID, &channelID, mute, deaf}} + v.wsMutex.Lock() + err = v.session.wsConn.WriteJSON(data) + v.wsMutex.Unlock() + if err != nil { + return + } + v.ChannelID = channelID + v.deaf = deaf + v.mute = mute + v.speaking = false + + return +} + +// Disconnect disconnects from this voice channel and closes the websocket +// and udp connections to Discord. +// !!! NOTE !!! this function may be removed in favour of ChannelVoiceLeave +func (v *VoiceConnection) Disconnect() (err error) { + + // Send a OP4 with a nil channel to disconnect + if v.sessionID != "" { + data := voiceChannelJoinOp{4, voiceChannelJoinData{&v.GuildID, nil, true, true}} + v.session.wsMutex.Lock() + err = v.session.wsConn.WriteJSON(data) + v.session.wsMutex.Unlock() + v.sessionID = "" + } + + // Close websocket and udp connections + v.Close() + + v.log(LogInformational, "Deleting VoiceConnection %s", v.GuildID) + + v.session.Lock() + delete(v.session.VoiceConnections, v.GuildID) + v.session.Unlock() + + return +} + +// Close closes the voice ws and udp connections +func (v *VoiceConnection) Close() { + + v.log(LogInformational, "called") + + v.Lock() + defer v.Unlock() + + v.Ready = false + v.speaking = false + + if v.close != nil { + v.log(LogInformational, "closing v.close") + close(v.close) + v.close = nil + } + + if v.udpConn != nil { + v.log(LogInformational, "closing udp") + err := v.udpConn.Close() + if err != nil { + log.Println("error closing udp connection: ", err) + } + v.udpConn = nil + } + + if v.wsConn != nil { + v.log(LogInformational, "sending close frame") + + // To cleanly close a connection, a client should send a close + // frame and wait for the server to close the connection. + v.wsMutex.Lock() + err := v.wsConn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + v.wsMutex.Unlock() + if err != nil { + v.log(LogError, "error closing websocket, %s", err) + } + + // TODO: Wait for Discord to actually close the connection. + time.Sleep(1 * time.Second) + + v.log(LogInformational, "closing websocket") + err = v.wsConn.Close() + if err != nil { + v.log(LogError, "error closing websocket, %s", err) + } + + v.wsConn = nil + } +} + +// AddHandler adds a Handler for VoiceSpeakingUpdate events. +func (v *VoiceConnection) AddHandler(h VoiceSpeakingUpdateHandler) { + v.Lock() + defer v.Unlock() + + v.voiceSpeakingUpdateHandlers = append(v.voiceSpeakingUpdateHandlers, h) +} + +// VoiceSpeakingUpdate is a struct for a VoiceSpeakingUpdate event. +type VoiceSpeakingUpdate struct { + UserID string `json:"user_id"` + SSRC int `json:"ssrc"` + Speaking bool `json:"speaking"` +} + +// ------------------------------------------------------------------------------------------------ +// Unexported Internal Functions Below. +// ------------------------------------------------------------------------------------------------ + +// A voiceOP4 stores the data for the voice operation 4 websocket event +// which provides us with the NaCl SecretBox encryption key +type voiceOP4 struct { + SecretKey [32]byte `json:"secret_key"` + Mode string `json:"mode"` +} + +// A voiceOP2 stores the data for the voice operation 2 websocket event +// which is sort of like the voice READY packet +type voiceOP2 struct { + SSRC uint32 `json:"ssrc"` + Port int `json:"port"` + Modes []string `json:"modes"` + HeartbeatInterval time.Duration `json:"heartbeat_interval"` +} + +// WaitUntilConnected waits for the Voice Connection to +// become ready, if it does not become ready it retuns an err +func (v *VoiceConnection) waitUntilConnected() error { + + v.log(LogInformational, "called") + + i := 0 + for { + v.RLock() + ready := v.Ready + v.RUnlock() + if ready { + return nil + } + + if i > 10 { + return fmt.Errorf("timeout waiting for voice") + } + + time.Sleep(1 * time.Second) + i++ + } +} + +// Open opens a voice connection. This should be called +// after VoiceChannelJoin is used and the data VOICE websocket events +// are captured. +func (v *VoiceConnection) open() (err error) { + + v.log(LogInformational, "called") + + v.Lock() + defer v.Unlock() + + // Don't open a websocket if one is already open + if v.wsConn != nil { + v.log(LogWarning, "refusing to overwrite non-nil websocket") + return + } + + // TODO temp? loop to wait for the SessionID + i := 0 + for { + if v.sessionID != "" { + break + } + if i > 20 { // only loop for up to 1 second total + return fmt.Errorf("did not receive voice Session ID in time") + } + time.Sleep(50 * time.Millisecond) + i++ + } + + // Connect to VoiceConnection Websocket + vg := fmt.Sprintf("wss://%s", strings.TrimSuffix(v.endpoint, ":80")) + v.log(LogInformational, "connecting to voice endpoint %s", vg) + v.wsConn, _, err = websocket.DefaultDialer.Dial(vg, nil) + if err != nil { + v.log(LogWarning, "error connecting to voice endpoint %s, %s", vg, err) + v.log(LogDebug, "voice struct: %#v\n", v) + return + } + + type voiceHandshakeData struct { + ServerID string `json:"server_id"` + UserID string `json:"user_id"` + SessionID string `json:"session_id"` + Token string `json:"token"` + } + type voiceHandshakeOp struct { + Op int `json:"op"` // Always 0 + Data voiceHandshakeData `json:"d"` + } + data := voiceHandshakeOp{0, voiceHandshakeData{v.GuildID, v.UserID, v.sessionID, v.token}} + + err = v.wsConn.WriteJSON(data) + if err != nil { + v.log(LogWarning, "error sending init packet, %s", err) + return + } + + v.close = make(chan struct{}) + go v.wsListen(v.wsConn, v.close) + + // add loop/check for Ready bool here? + // then return false if not ready? + // but then wsListen will also err. + + return +} + +// wsListen listens on the voice websocket for messages and passes them +// to the voice event handler. This is automatically called by the Open func +func (v *VoiceConnection) wsListen(wsConn *websocket.Conn, close <-chan struct{}) { + + v.log(LogInformational, "called") + + for { + _, message, err := v.wsConn.ReadMessage() + if err != nil { + // Detect if we have been closed manually. If a Close() has already + // happened, the websocket we are listening on will be different to the + // current session. + v.RLock() + sameConnection := v.wsConn == wsConn + v.RUnlock() + if sameConnection { + + v.log(LogError, "voice endpoint %s websocket closed unexpectantly, %s", v.endpoint, err) + + // Start reconnect goroutine then exit. + go v.reconnect() + } + return + } + + // Pass received message to voice event handler + select { + case <-close: + return + default: + go v.onEvent(message) + } + } +} + +// wsEvent handles any voice websocket events. This is only called by the +// wsListen() function. +func (v *VoiceConnection) onEvent(message []byte) { + + v.log(LogDebug, "received: %s", string(message)) + + var e Event + if err := json.Unmarshal(message, &e); err != nil { + v.log(LogError, "unmarshall error, %s", err) + return + } + + switch e.Operation { + + case 2: // READY + + if err := json.Unmarshal(e.RawData, &v.op2); err != nil { + v.log(LogError, "OP2 unmarshall error, %s, %s", err, string(e.RawData)) + return + } + + // Start the voice websocket heartbeat to keep the connection alive + go v.wsHeartbeat(v.wsConn, v.close, v.op2.HeartbeatInterval) + // TODO monitor a chan/bool to verify this was successful + + // Start the UDP connection + err := v.udpOpen() + if err != nil { + v.log(LogError, "error opening udp connection, %s", err) + return + } + + // Start the opusSender. + // TODO: Should we allow 48000/960 values to be user defined? + if v.OpusSend == nil { + v.OpusSend = make(chan []byte, 2) + } + go v.opusSender(v.udpConn, v.close, v.OpusSend, 48000, 960) + + // Start the opusReceiver + if !v.deaf { + if v.OpusRecv == nil { + v.OpusRecv = make(chan *Packet, 2) + } + + go v.opusReceiver(v.udpConn, v.close, v.OpusRecv) + } + + return + + case 3: // HEARTBEAT response + // add code to use this to track latency? + return + + case 4: // udp encryption secret key + v.Lock() + defer v.Unlock() + + v.op4 = voiceOP4{} + if err := json.Unmarshal(e.RawData, &v.op4); err != nil { + v.log(LogError, "OP4 unmarshall error, %s, %s", err, string(e.RawData)) + return + } + return + + case 5: + if len(v.voiceSpeakingUpdateHandlers) == 0 { + return + } + + voiceSpeakingUpdate := &VoiceSpeakingUpdate{} + if err := json.Unmarshal(e.RawData, voiceSpeakingUpdate); err != nil { + v.log(LogError, "OP5 unmarshall error, %s, %s", err, string(e.RawData)) + return + } + + for _, h := range v.voiceSpeakingUpdateHandlers { + h(v, voiceSpeakingUpdate) + } + + default: + v.log(LogDebug, "unknown voice operation, %d, %s", e.Operation, string(e.RawData)) + } + + return +} + +type voiceHeartbeatOp struct { + Op int `json:"op"` // Always 3 + Data int `json:"d"` +} + +// NOTE :: When a guild voice server changes how do we shut this down +// properly, so a new connection can be setup without fuss? +// +// wsHeartbeat sends regular heartbeats to voice Discord so it knows the client +// is still connected. If you do not send these heartbeats Discord will +// disconnect the websocket connection after a few seconds. +func (v *VoiceConnection) wsHeartbeat(wsConn *websocket.Conn, close <-chan struct{}, i time.Duration) { + + if close == nil || wsConn == nil { + return + } + + var err error + ticker := time.NewTicker(i * time.Millisecond) + defer ticker.Stop() + for { + v.log(LogDebug, "sending heartbeat packet") + v.wsMutex.Lock() + err = wsConn.WriteJSON(voiceHeartbeatOp{3, int(time.Now().Unix())}) + v.wsMutex.Unlock() + if err != nil { + v.log(LogError, "error sending heartbeat to voice endpoint %s, %s", v.endpoint, err) + return + } + + select { + case <-ticker.C: + // continue loop and send heartbeat + case <-close: + return + } + } +} + +// ------------------------------------------------------------------------------------------------ +// Code related to the VoiceConnection UDP connection +// ------------------------------------------------------------------------------------------------ + +type voiceUDPData struct { + Address string `json:"address"` // Public IP of machine running this code + Port uint16 `json:"port"` // UDP Port of machine running this code + Mode string `json:"mode"` // always "xsalsa20_poly1305" +} + +type voiceUDPD struct { + Protocol string `json:"protocol"` // Always "udp" ? + Data voiceUDPData `json:"data"` +} + +type voiceUDPOp struct { + Op int `json:"op"` // Always 1 + Data voiceUDPD `json:"d"` +} + +// udpOpen opens a UDP connection to the voice server and completes the +// initial required handshake. This connection is left open in the session +// and can be used to send or receive audio. This should only be called +// from voice.wsEvent OP2 +func (v *VoiceConnection) udpOpen() (err error) { + + v.Lock() + defer v.Unlock() + + if v.wsConn == nil { + return fmt.Errorf("nil voice websocket") + } + + if v.udpConn != nil { + return fmt.Errorf("udp connection already open") + } + + if v.close == nil { + return fmt.Errorf("nil close channel") + } + + if v.endpoint == "" { + return fmt.Errorf("empty endpoint") + } + + host := fmt.Sprintf("%s:%d", strings.TrimSuffix(v.endpoint, ":80"), v.op2.Port) + addr, err := net.ResolveUDPAddr("udp", host) + if err != nil { + v.log(LogWarning, "error resolving udp host %s, %s", host, err) + return + } + + v.log(LogInformational, "connecting to udp addr %s", addr.String()) + v.udpConn, err = net.DialUDP("udp", nil, addr) + if err != nil { + v.log(LogWarning, "error connecting to udp addr %s, %s", addr.String(), err) + return + } + + // Create a 70 byte array and put the SSRC code from the Op 2 VoiceConnection event + // into it. Then send that over the UDP connection to Discord + sb := make([]byte, 70) + binary.BigEndian.PutUint32(sb, v.op2.SSRC) + _, err = v.udpConn.Write(sb) + if err != nil { + v.log(LogWarning, "udp write error to %s, %s", addr.String(), err) + return + } + + // Create a 70 byte array and listen for the initial handshake response + // from Discord. Once we get it parse the IP and PORT information out + // of the response. This should be our public IP and PORT as Discord + // saw us. + rb := make([]byte, 70) + rlen, _, err := v.udpConn.ReadFromUDP(rb) + if err != nil { + v.log(LogWarning, "udp read error, %s, %s", addr.String(), err) + return + } + + if rlen < 70 { + v.log(LogWarning, "received udp packet too small") + return fmt.Errorf("received udp packet too small") + } + + // Loop over position 4 through 20 to grab the IP address + // Should never be beyond position 20. + var ip string + for i := 4; i < 20; i++ { + if rb[i] == 0 { + break + } + ip += string(rb[i]) + } + + // Grab port from position 68 and 69 + port := binary.LittleEndian.Uint16(rb[68:70]) + + // Take the data from above and send it back to Discord to finalize + // the UDP connection handshake. + data := voiceUDPOp{1, voiceUDPD{"udp", voiceUDPData{ip, port, "xsalsa20_poly1305"}}} + + v.wsMutex.Lock() + err = v.wsConn.WriteJSON(data) + v.wsMutex.Unlock() + if err != nil { + v.log(LogWarning, "udp write error, %#v, %s", data, err) + return + } + + // start udpKeepAlive + go v.udpKeepAlive(v.udpConn, v.close, 5*time.Second) + // TODO: find a way to check that it fired off okay + + return +} + +// udpKeepAlive sends a udp packet to keep the udp connection open +// This is still a bit of a "proof of concept" +func (v *VoiceConnection) udpKeepAlive(udpConn *net.UDPConn, close <-chan struct{}, i time.Duration) { + + if udpConn == nil || close == nil { + return + } + + var err error + var sequence uint64 + + packet := make([]byte, 8) + + ticker := time.NewTicker(i) + defer ticker.Stop() + for { + + binary.LittleEndian.PutUint64(packet, sequence) + sequence++ + + _, err = udpConn.Write(packet) + if err != nil { + v.log(LogError, "write error, %s", err) + return + } + + select { + case <-ticker.C: + // continue loop and send keepalive + case <-close: + return + } + } +} + +// opusSender will listen on the given channel and send any +// pre-encoded opus audio to Discord. Supposedly. +func (v *VoiceConnection) opusSender(udpConn *net.UDPConn, close <-chan struct{}, opus <-chan []byte, rate, size int) { + + if udpConn == nil || close == nil { + return + } + + // VoiceConnection is now ready to receive audio packets + // TODO: this needs reviewed as I think there must be a better way. + v.Lock() + v.Ready = true + v.Unlock() + defer func() { + v.Lock() + v.Ready = false + v.Unlock() + }() + + var sequence uint16 + var timestamp uint32 + var recvbuf []byte + var ok bool + udpHeader := make([]byte, 12) + var nonce [24]byte + + // build the parts that don't change in the udpHeader + udpHeader[0] = 0x80 + udpHeader[1] = 0x78 + binary.BigEndian.PutUint32(udpHeader[8:], v.op2.SSRC) + + // start a send loop that loops until buf chan is closed + ticker := time.NewTicker(time.Millisecond * time.Duration(size/(rate/1000))) + defer ticker.Stop() + for { + + // Get data from chan. If chan is closed, return. + select { + case <-close: + return + case recvbuf, ok = <-opus: + if !ok { + return + } + // else, continue loop + } + + v.RLock() + speaking := v.speaking + v.RUnlock() + if !speaking { + err := v.Speaking(true) + if err != nil { + v.log(LogError, "error sending speaking packet, %s", err) + } + } + + // Add sequence and timestamp to udpPacket + binary.BigEndian.PutUint16(udpHeader[2:], sequence) + binary.BigEndian.PutUint32(udpHeader[4:], timestamp) + + // encrypt the opus data + copy(nonce[:], udpHeader) + v.RLock() + sendbuf := secretbox.Seal(udpHeader, recvbuf, &nonce, &v.op4.SecretKey) + v.RUnlock() + + // block here until we're exactly at the right time :) + // Then send rtp audio packet to Discord over UDP + select { + case <-close: + return + case <-ticker.C: + // continue + } + _, err := udpConn.Write(sendbuf) + + if err != nil { + v.log(LogError, "udp write error, %s", err) + v.log(LogDebug, "voice struct: %#v\n", v) + return + } + + if (sequence) == 0xFFFF { + sequence = 0 + } else { + sequence++ + } + + if (timestamp + uint32(size)) >= 0xFFFFFFFF { + timestamp = 0 + } else { + timestamp += uint32(size) + } + } +} + +// A Packet contains the headers and content of a received voice packet. +type Packet struct { + SSRC uint32 + Sequence uint16 + Timestamp uint32 + Type []byte + Opus []byte + PCM []int16 +} + +// opusReceiver listens on the UDP socket for incoming packets +// and sends them across the given channel +// NOTE :: This function may change names later. +func (v *VoiceConnection) opusReceiver(udpConn *net.UDPConn, close <-chan struct{}, c chan *Packet) { + + if udpConn == nil || close == nil { + return + } + + recvbuf := make([]byte, 1024) + var nonce [24]byte + + for { + rlen, err := udpConn.Read(recvbuf) + if err != nil { + // Detect if we have been closed manually. If a Close() has already + // happened, the udp connection we are listening on will be different + // to the current session. + v.RLock() + sameConnection := v.udpConn == udpConn + v.RUnlock() + if sameConnection { + + v.log(LogError, "udp read error, %s, %s", v.endpoint, err) + v.log(LogDebug, "voice struct: %#v\n", v) + + go v.reconnect() + } + return + } + + select { + case <-close: + return + default: + // continue loop + } + + // For now, skip anything except audio. + if rlen < 12 || recvbuf[0] != 0x80 { + continue + } + + // build a audio packet struct + p := Packet{} + p.Type = recvbuf[0:2] + p.Sequence = binary.BigEndian.Uint16(recvbuf[2:4]) + p.Timestamp = binary.BigEndian.Uint32(recvbuf[4:8]) + p.SSRC = binary.BigEndian.Uint32(recvbuf[8:12]) + // decrypt opus data + copy(nonce[:], recvbuf[0:12]) + p.Opus, _ = secretbox.Open(nil, recvbuf[12:rlen], &nonce, &v.op4.SecretKey) + + if c != nil { + c <- &p + } + } +} + +// Reconnect will close down a voice connection then immediately try to +// reconnect to that session. +// NOTE : This func is messy and a WIP while I find what works. +// It will be cleaned up once a proven stable option is flushed out. +// aka: this is ugly shit code, please don't judge too harshly. +func (v *VoiceConnection) reconnect() { + + v.log(LogInformational, "called") + + v.Lock() + if v.reconnecting { + v.log(LogInformational, "already reconnecting to channel %s, exiting", v.ChannelID) + v.Unlock() + return + } + v.reconnecting = true + v.Unlock() + + defer func() { v.reconnecting = false }() + + // Close any currently open connections + v.Close() + + wait := time.Duration(1) + for { + + <-time.After(wait * time.Second) + wait *= 2 + if wait > 600 { + wait = 600 + } + + if v.session.DataReady == false || v.session.wsConn == nil { + v.log(LogInformational, "cannot reconenct to channel %s with unready session", v.ChannelID) + continue + } + + v.log(LogInformational, "trying to reconnect to channel %s", v.ChannelID) + + _, err := v.session.ChannelVoiceJoin(v.GuildID, v.ChannelID, v.mute, v.deaf) + if err == nil { + v.log(LogInformational, "successfully reconnected to channel %s", v.ChannelID) + return + } + + v.log(LogInformational, "error reconnecting to channel %s, %s", v.ChannelID, err) + + // if the reconnect above didn't work lets just send a disconnect + // packet to reset things. + // Send a OP4 with a nil channel to disconnect + data := voiceChannelJoinOp{4, voiceChannelJoinData{&v.GuildID, nil, true, true}} + v.session.wsMutex.Lock() + err = v.session.wsConn.WriteJSON(data) + v.session.wsMutex.Unlock() + if err != nil { + v.log(LogError, "error sending disconnect packet, %s", err) + } + + } +} diff --git a/vendor/github.com/bwmarrin/discordgo/wsapi.go b/vendor/github.com/bwmarrin/discordgo/wsapi.go new file mode 100644 index 0000000..0912850 --- /dev/null +++ b/vendor/github.com/bwmarrin/discordgo/wsapi.go @@ -0,0 +1,750 @@ +// Discordgo - Discord bindings for Go +// Available at https://github.com/bwmarrin/discordgo + +// Copyright 2015-2016 Bruce Marriner . All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// This file contains low level functions for interacting with the Discord +// data websocket interface. + +package discordgo + +import ( + "bytes" + "compress/zlib" + "encoding/json" + "errors" + "fmt" + "io" + "net/http" + "runtime" + "sync/atomic" + "time" + + "github.com/gorilla/websocket" +) + +// ErrWSAlreadyOpen is thrown when you attempt to open +// a websocket that already is open. +var ErrWSAlreadyOpen = errors.New("web socket already opened") + +// ErrWSNotFound is thrown when you attempt to use a websocket +// that doesn't exist +var ErrWSNotFound = errors.New("no websocket connection exists") + +// ErrWSShardBounds is thrown when you try to use a shard ID that is +// less than the total shard count +var ErrWSShardBounds = errors.New("ShardID must be less than ShardCount") + +type resumePacket struct { + Op int `json:"op"` + Data struct { + Token string `json:"token"` + SessionID string `json:"session_id"` + Sequence int64 `json:"seq"` + } `json:"d"` +} + +// Open opens a websocket connection to Discord. +func (s *Session) Open() (err error) { + + s.log(LogInformational, "called") + + s.Lock() + defer func() { + if err != nil { + s.Unlock() + } + }() + + // A basic state is a hard requirement for Voice. + if s.State == nil { + state := NewState() + state.TrackChannels = false + state.TrackEmojis = false + state.TrackMembers = false + state.TrackRoles = false + state.TrackVoice = false + s.State = state + } + + if s.wsConn != nil { + err = ErrWSAlreadyOpen + return + } + + if s.VoiceConnections == nil { + s.log(LogInformational, "creating new VoiceConnections map") + s.VoiceConnections = make(map[string]*VoiceConnection) + } + + // Get the gateway to use for the Websocket connection + if s.gateway == "" { + s.gateway, err = s.Gateway() + if err != nil { + return + } + + // Add the version and encoding to the URL + s.gateway = fmt.Sprintf("%s?v=5&encoding=json", s.gateway) + } + + header := http.Header{} + header.Add("accept-encoding", "zlib") + + s.log(LogInformational, "connecting to gateway %s", s.gateway) + s.wsConn, _, err = websocket.DefaultDialer.Dial(s.gateway, header) + if err != nil { + s.log(LogWarning, "error connecting to gateway %s, %s", s.gateway, err) + s.gateway = "" // clear cached gateway + // TODO: should we add a retry block here? + return + } + + sequence := atomic.LoadInt64(s.sequence) + if s.sessionID != "" && sequence > 0 { + + p := resumePacket{} + p.Op = 6 + p.Data.Token = s.Token + p.Data.SessionID = s.sessionID + p.Data.Sequence = sequence + + s.log(LogInformational, "sending resume packet to gateway") + err = s.wsConn.WriteJSON(p) + if err != nil { + s.log(LogWarning, "error sending gateway resume packet, %s, %s", s.gateway, err) + return + } + + } else { + + err = s.identify() + if err != nil { + s.log(LogWarning, "error sending gateway identify packet, %s, %s", s.gateway, err) + return + } + } + + // Create listening outside of listen, as it needs to happen inside the mutex + // lock. + s.listening = make(chan interface{}) + go s.listen(s.wsConn, s.listening) + + s.Unlock() + + s.log(LogInformational, "emit connect event") + s.handleEvent(connectEventType, &Connect{}) + + s.log(LogInformational, "exiting") + return +} + +// listen polls the websocket connection for events, it will stop when the +// listening channel is closed, or an error occurs. +func (s *Session) listen(wsConn *websocket.Conn, listening <-chan interface{}) { + + s.log(LogInformational, "called") + + for { + + messageType, message, err := wsConn.ReadMessage() + + if err != nil { + + // Detect if we have been closed manually. If a Close() has already + // happened, the websocket we are listening on will be different to + // the current session. + s.RLock() + sameConnection := s.wsConn == wsConn + s.RUnlock() + + if sameConnection { + + s.log(LogWarning, "error reading from gateway %s websocket, %s", s.gateway, err) + // There has been an error reading, close the websocket so that + // OnDisconnect event is emitted. + err := s.Close() + if err != nil { + s.log(LogWarning, "error closing session connection, %s", err) + } + + s.log(LogInformational, "calling reconnect() now") + s.reconnect() + } + + return + } + + select { + + case <-listening: + return + + default: + s.onEvent(messageType, message) + + } + } +} + +type heartbeatOp struct { + Op int `json:"op"` + Data int64 `json:"d"` +} + +type helloOp struct { + HeartbeatInterval time.Duration `json:"heartbeat_interval"` + Trace []string `json:"_trace"` +} + +// heartbeat sends regular heartbeats to Discord so it knows the client +// is still connected. If you do not send these heartbeats Discord will +// disconnect the websocket connection after a few seconds. +func (s *Session) heartbeat(wsConn *websocket.Conn, listening <-chan interface{}, i time.Duration) { + + s.log(LogInformational, "called") + + if listening == nil || wsConn == nil { + return + } + + var err error + ticker := time.NewTicker(i * time.Millisecond) + defer ticker.Stop() + + for { + sequence := atomic.LoadInt64(s.sequence) + s.log(LogInformational, "sending gateway websocket heartbeat seq %d", sequence) + s.wsMutex.Lock() + err = wsConn.WriteJSON(heartbeatOp{1, sequence}) + s.wsMutex.Unlock() + if err != nil { + s.log(LogError, "error sending heartbeat to gateway %s, %s", s.gateway, err) + s.Lock() + s.DataReady = false + s.Unlock() + return + } + s.Lock() + s.DataReady = true + s.Unlock() + + select { + case <-ticker.C: + // continue loop and send heartbeat + case <-listening: + return + } + } +} + +type updateStatusData struct { + IdleSince *int `json:"idle_since"` + Game *Game `json:"game"` +} + +type updateStatusOp struct { + Op int `json:"op"` + Data updateStatusData `json:"d"` +} + +// UpdateStreamingStatus is used to update the user's streaming status. +// If idle>0 then set status to idle. +// If game!="" then set game. +// If game!="" and url!="" then set the status type to streaming with the URL set. +// if otherwise, set status to active, and no game. +func (s *Session) UpdateStreamingStatus(idle int, game string, url string) (err error) { + + s.log(LogInformational, "called") + + s.RLock() + defer s.RUnlock() + if s.wsConn == nil { + return ErrWSNotFound + } + + var usd updateStatusData + if idle > 0 { + usd.IdleSince = &idle + } + + if game != "" { + gameType := 0 + if url != "" { + gameType = 1 + } + usd.Game = &Game{ + Name: game, + Type: gameType, + URL: url, + } + } + + s.wsMutex.Lock() + err = s.wsConn.WriteJSON(updateStatusOp{3, usd}) + s.wsMutex.Unlock() + + return +} + +// UpdateStatus is used to update the user's status. +// If idle>0 then set status to idle. +// If game!="" then set game. +// if otherwise, set status to active, and no game. +func (s *Session) UpdateStatus(idle int, game string) (err error) { + return s.UpdateStreamingStatus(idle, game, "") +} + +type requestGuildMembersData struct { + GuildID string `json:"guild_id"` + Query string `json:"query"` + Limit int `json:"limit"` +} + +type requestGuildMembersOp struct { + Op int `json:"op"` + Data requestGuildMembersData `json:"d"` +} + +// RequestGuildMembers requests guild members from the gateway +// The gateway responds with GuildMembersChunk events +// guildID : The ID of the guild to request members of +// query : String that username starts with, leave empty to return all members +// limit : Max number of items to return, or 0 to request all members matched +func (s *Session) RequestGuildMembers(guildID, query string, limit int) (err error) { + s.log(LogInformational, "called") + + s.RLock() + defer s.RUnlock() + if s.wsConn == nil { + return ErrWSNotFound + } + + data := requestGuildMembersData{ + GuildID: guildID, + Query: query, + Limit: limit, + } + + s.wsMutex.Lock() + err = s.wsConn.WriteJSON(requestGuildMembersOp{8, data}) + s.wsMutex.Unlock() + + return +} + +// onEvent is the "event handler" for all messages received on the +// Discord Gateway API websocket connection. +// +// If you use the AddHandler() function to register a handler for a +// specific event this function will pass the event along to that handler. +// +// If you use the AddHandler() function to register a handler for the +// "OnEvent" event then all events will be passed to that handler. +// +// TODO: You may also register a custom event handler entirely using... +func (s *Session) onEvent(messageType int, message []byte) { + + var err error + var reader io.Reader + reader = bytes.NewBuffer(message) + + // If this is a compressed message, uncompress it. + if messageType == websocket.BinaryMessage { + + z, err2 := zlib.NewReader(reader) + if err2 != nil { + s.log(LogError, "error uncompressing websocket message, %s", err) + return + } + + defer func() { + err3 := z.Close() + if err3 != nil { + s.log(LogWarning, "error closing zlib, %s", err) + } + }() + + reader = z + } + + // Decode the event into an Event struct. + var e *Event + decoder := json.NewDecoder(reader) + if err = decoder.Decode(&e); err != nil { + s.log(LogError, "error decoding websocket message, %s", err) + return + } + + s.log(LogDebug, "Op: %d, Seq: %d, Type: %s, Data: %s\n\n", e.Operation, e.Sequence, e.Type, string(e.RawData)) + + // Ping request. + // Must respond with a heartbeat packet within 5 seconds + if e.Operation == 1 { + s.log(LogInformational, "sending heartbeat in response to Op1") + s.wsMutex.Lock() + err = s.wsConn.WriteJSON(heartbeatOp{1, atomic.LoadInt64(s.sequence)}) + s.wsMutex.Unlock() + if err != nil { + s.log(LogError, "error sending heartbeat in response to Op1") + return + } + + return + } + + // Reconnect + // Must immediately disconnect from gateway and reconnect to new gateway. + if e.Operation == 7 { + // TODO + } + + // Invalid Session + // Must respond with a Identify packet. + if e.Operation == 9 { + + s.log(LogInformational, "sending identify packet to gateway in response to Op9") + + err = s.identify() + if err != nil { + s.log(LogWarning, "error sending gateway identify packet, %s, %s", s.gateway, err) + return + } + + return + } + + if e.Operation == 10 { + var h helloOp + if err = json.Unmarshal(e.RawData, &h); err != nil { + s.log(LogError, "error unmarshalling helloOp, %s", err) + } else { + go s.heartbeat(s.wsConn, s.listening, h.HeartbeatInterval) + } + return + } + + // Do not try to Dispatch a non-Dispatch Message + if e.Operation != 0 { + // But we probably should be doing something with them. + // TEMP + s.log(LogWarning, "unknown Op: %d, Seq: %d, Type: %s, Data: %s, message: %s", e.Operation, e.Sequence, e.Type, string(e.RawData), string(message)) + return + } + + // Store the message sequence + atomic.StoreInt64(s.sequence, e.Sequence) + + // Map event to registered event handlers and pass it along to any registered handlers. + if eh, ok := registeredInterfaceProviders[e.Type]; ok { + e.Struct = eh.New() + + // Attempt to unmarshal our event. + if err = json.Unmarshal(e.RawData, e.Struct); err != nil { + s.log(LogError, "error unmarshalling %s event, %s", e.Type, err) + } + + // Send event to any registered event handlers for it's type. + // Because the above doesn't cancel this, in case of an error + // the struct could be partially populated or at default values. + // However, most errors are due to a single field and I feel + // it's better to pass along what we received than nothing at all. + // TODO: Think about that decision :) + // Either way, READY events must fire, even with errors. + s.handleEvent(e.Type, e.Struct) + } else { + s.log(LogWarning, "unknown event: Op: %d, Seq: %d, Type: %s, Data: %s", e.Operation, e.Sequence, e.Type, string(e.RawData)) + } + + // For legacy reasons, we send the raw event also, this could be useful for handling unknown events. + s.handleEvent(eventEventType, e) +} + +// ------------------------------------------------------------------------------------------------ +// Code related to voice connections that initiate over the data websocket +// ------------------------------------------------------------------------------------------------ + +type voiceChannelJoinData struct { + GuildID *string `json:"guild_id"` + ChannelID *string `json:"channel_id"` + SelfMute bool `json:"self_mute"` + SelfDeaf bool `json:"self_deaf"` +} + +type voiceChannelJoinOp struct { + Op int `json:"op"` + Data voiceChannelJoinData `json:"d"` +} + +// ChannelVoiceJoin joins the session user to a voice channel. +// +// gID : Guild ID of the channel to join. +// cID : Channel ID of the channel to join. +// mute : If true, you will be set to muted upon joining. +// deaf : If true, you will be set to deafened upon joining. +func (s *Session) ChannelVoiceJoin(gID, cID string, mute, deaf bool) (voice *VoiceConnection, err error) { + + s.log(LogInformational, "called") + + s.RLock() + voice, _ = s.VoiceConnections[gID] + s.RUnlock() + + if voice == nil { + voice = &VoiceConnection{} + s.Lock() + s.VoiceConnections[gID] = voice + s.Unlock() + } + + voice.Lock() + voice.GuildID = gID + voice.ChannelID = cID + voice.deaf = deaf + voice.mute = mute + voice.session = s + voice.Unlock() + + // Send the request to Discord that we want to join the voice channel + data := voiceChannelJoinOp{4, voiceChannelJoinData{&gID, &cID, mute, deaf}} + s.wsMutex.Lock() + err = s.wsConn.WriteJSON(data) + s.wsMutex.Unlock() + if err != nil { + return + } + + // doesn't exactly work perfect yet.. TODO + err = voice.waitUntilConnected() + if err != nil { + s.log(LogWarning, "error waiting for voice to connect, %s", err) + voice.Close() + return + } + + return +} + +// onVoiceStateUpdate handles Voice State Update events on the data websocket. +func (s *Session) onVoiceStateUpdate(st *VoiceStateUpdate) { + + // If we don't have a connection for the channel, don't bother + if st.ChannelID == "" { + return + } + + // Check if we have a voice connection to update + s.RLock() + voice, exists := s.VoiceConnections[st.GuildID] + s.RUnlock() + if !exists { + return + } + + // We only care about events that are about us. + if s.State.User.ID != st.UserID { + return + } + + // Store the SessionID for later use. + voice.Lock() + voice.UserID = st.UserID + voice.sessionID = st.SessionID + voice.ChannelID = st.ChannelID + voice.Unlock() +} + +// onVoiceServerUpdate handles the Voice Server Update data websocket event. +// +// This is also fired if the Guild's voice region changes while connected +// to a voice channel. In that case, need to re-establish connection to +// the new region endpoint. +func (s *Session) onVoiceServerUpdate(st *VoiceServerUpdate) { + + s.log(LogInformational, "called") + + s.RLock() + voice, exists := s.VoiceConnections[st.GuildID] + s.RUnlock() + + // If no VoiceConnection exists, just skip this + if !exists { + return + } + + // If currently connected to voice ws/udp, then disconnect. + // Has no effect if not connected. + voice.Close() + + // Store values for later use + voice.Lock() + voice.token = st.Token + voice.endpoint = st.Endpoint + voice.GuildID = st.GuildID + voice.Unlock() + + // Open a conenction to the voice server + err := voice.open() + if err != nil { + s.log(LogError, "onVoiceServerUpdate voice.open, %s", err) + } +} + +type identifyProperties struct { + OS string `json:"$os"` + Browser string `json:"$browser"` + Device string `json:"$device"` + Referer string `json:"$referer"` + ReferringDomain string `json:"$referring_domain"` +} + +type identifyData struct { + Token string `json:"token"` + Properties identifyProperties `json:"properties"` + LargeThreshold int `json:"large_threshold"` + Compress bool `json:"compress"` + Shard *[2]int `json:"shard,omitempty"` +} + +type identifyOp struct { + Op int `json:"op"` + Data identifyData `json:"d"` +} + +// identify sends the identify packet to the gateway +func (s *Session) identify() error { + + properties := identifyProperties{runtime.GOOS, + "Discordgo v" + VERSION, + "", + "", + "", + } + + data := identifyData{s.Token, + properties, + 250, + s.Compress, + nil, + } + + if s.ShardCount > 1 { + + if s.ShardID >= s.ShardCount { + return ErrWSShardBounds + } + + data.Shard = &[2]int{s.ShardID, s.ShardCount} + } + + op := identifyOp{2, data} + + s.wsMutex.Lock() + err := s.wsConn.WriteJSON(op) + s.wsMutex.Unlock() + if err != nil { + return err + } + + return nil +} + +func (s *Session) reconnect() { + + s.log(LogInformational, "called") + + var err error + + if s.ShouldReconnectOnError { + + wait := time.Duration(1) + + for { + s.log(LogInformational, "trying to reconnect to gateway") + + err = s.Open() + if err == nil { + s.log(LogInformational, "successfully reconnected to gateway") + + // I'm not sure if this is actually needed. + // if the gw reconnect works properly, voice should stay alive + // However, there seems to be cases where something "weird" + // happens. So we're doing this for now just to improve + // stability in those edge cases. + s.RLock() + defer s.RUnlock() + for _, v := range s.VoiceConnections { + + s.log(LogInformational, "reconnecting voice connection to guild %s", v.GuildID) + go v.reconnect() + + // This is here just to prevent violently spamming the + // voice reconnects + time.Sleep(1 * time.Second) + + } + return + } + + s.log(LogError, "error reconnecting to gateway, %s", err) + + <-time.After(wait * time.Second) + wait *= 2 + if wait > 600 { + wait = 600 + } + } + } +} + +// Close closes a websocket and stops all listening/heartbeat goroutines. +// TODO: Add support for Voice WS/UDP connections +func (s *Session) Close() (err error) { + + s.log(LogInformational, "called") + s.Lock() + + s.DataReady = false + + if s.listening != nil { + s.log(LogInformational, "closing listening channel") + close(s.listening) + s.listening = nil + } + + // TODO: Close all active Voice Connections too + // this should force stop any reconnecting voice channels too + + if s.wsConn != nil { + + s.log(LogInformational, "sending close frame") + // To cleanly close a connection, a client should send a close + // frame and wait for the server to close the connection. + s.wsMutex.Lock() + err := s.wsConn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")) + s.wsMutex.Unlock() + if err != nil { + s.log(LogInformational, "error closing websocket, %s", err) + } + + // TODO: Wait for Discord to actually close the connection. + time.Sleep(1 * time.Second) + + s.log(LogInformational, "closing gateway websocket") + err = s.wsConn.Close() + if err != nil { + s.log(LogInformational, "error closing websocket, %s", err) + } + + s.wsConn = nil + } + + s.Unlock() + + s.log(LogInformational, "emit disconnect event") + s.handleEvent(disconnectEventType, &Disconnect{}) + + return +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypass.go b/vendor/github.com/davecgh/go-spew/spew/bypass.go new file mode 100644 index 0000000..7f166c3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypass.go @@ -0,0 +1,152 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is not running on Google App Engine, compiled by GopherJS, and +// "-tags safe" is not added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build !js,!appengine,!safe,!disableunsafe + +package spew + +import ( + "reflect" + "unsafe" +) + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = false + + // ptrSize is the size of a pointer on the current arch. + ptrSize = unsafe.Sizeof((*byte)(nil)) +) + +var ( + // offsetPtr, offsetScalar, and offsetFlag are the offsets for the + // internal reflect.Value fields. These values are valid before golang + // commit ecccf07e7f9d which changed the format. The are also valid + // after commit 82f48826c6c7 which changed the format again to mirror + // the original format. Code in the init function updates these offsets + // as necessary. + offsetPtr = ptrSize + offsetScalar = uintptr(0) + offsetFlag = ptrSize * 2 + + // flagKindWidth and flagKindShift indicate various bits that the + // reflect package uses internally to track kind information. + // + // flagRO indicates whether or not the value field of a reflect.Value is + // read-only. + // + // flagIndir indicates whether the value field of a reflect.Value is + // the actual data or a pointer to the data. + // + // These values are valid before golang commit 90a7c3c86944 which + // changed their positions. Code in the init function updates these + // flags as necessary. + flagKindWidth = uintptr(5) + flagKindShift = flagKindWidth - 1 + flagRO = uintptr(1 << 0) + flagIndir = uintptr(1 << 1) +) + +func init() { + // Older versions of reflect.Value stored small integers directly in the + // ptr field (which is named val in the older versions). Versions + // between commits ecccf07e7f9d and 82f48826c6c7 added a new field named + // scalar for this purpose which unfortunately came before the flag + // field, so the offset of the flag field is different for those + // versions. + // + // This code constructs a new reflect.Value from a known small integer + // and checks if the size of the reflect.Value struct indicates it has + // the scalar field. When it does, the offsets are updated accordingly. + vv := reflect.ValueOf(0xf00) + if unsafe.Sizeof(vv) == (ptrSize * 4) { + offsetScalar = ptrSize * 2 + offsetFlag = ptrSize * 3 + } + + // Commit 90a7c3c86944 changed the flag positions such that the low + // order bits are the kind. This code extracts the kind from the flags + // field and ensures it's the correct type. When it's not, the flag + // order has been changed to the newer format, so the flags are updated + // accordingly. + upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag) + upfv := *(*uintptr)(upf) + flagKindMask := uintptr((1<>flagKindShift != uintptr(reflect.Int) { + flagKindShift = 0 + flagRO = 1 << 5 + flagIndir = 1 << 6 + + // Commit adf9b30e5594 modified the flags to separate the + // flagRO flag into two bits which specifies whether or not the + // field is embedded. This causes flagIndir to move over a bit + // and means that flagRO is the combination of either of the + // original flagRO bit and the new bit. + // + // This code detects the change by extracting what used to be + // the indirect bit to ensure it's set. When it's not, the flag + // order has been changed to the newer format, so the flags are + // updated accordingly. + if upfv&flagIndir == 0 { + flagRO = 3 << 5 + flagIndir = 1 << 7 + } + } +} + +// unsafeReflectValue converts the passed reflect.Value into a one that bypasses +// the typical safety restrictions preventing access to unaddressable and +// unexported data. It works by digging the raw pointer to the underlying +// value out of the protected value and generating a new unprotected (unsafe) +// reflect.Value to it. +// +// This allows us to check for implementations of the Stringer and error +// interfaces to be used for pretty printing ordinarily unaddressable and +// inaccessible values such as unexported struct fields. +func unsafeReflectValue(v reflect.Value) (rv reflect.Value) { + indirects := 1 + vt := v.Type() + upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr) + rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag)) + if rvf&flagIndir != 0 { + vt = reflect.PtrTo(v.Type()) + indirects++ + } else if offsetScalar != 0 { + // The value is in the scalar field when it's not one of the + // reference types. + switch vt.Kind() { + case reflect.Uintptr: + case reflect.Chan: + case reflect.Func: + case reflect.Map: + case reflect.Ptr: + case reflect.UnsafePointer: + default: + upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + + offsetScalar) + } + } + + pv := reflect.NewAt(vt, upv) + rv = pv + for i := 0; i < indirects; i++ { + rv = rv.Elem() + } + return rv +} diff --git a/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go new file mode 100644 index 0000000..1fe3cf3 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/bypasssafe.go @@ -0,0 +1,38 @@ +// Copyright (c) 2015-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when the code is running on Google App Engine, compiled by GopherJS, or +// "-tags safe" is added to the go build command line. The "disableunsafe" +// tag is deprecated and thus should not be used. +// +build js appengine safe disableunsafe + +package spew + +import "reflect" + +const ( + // UnsafeDisabled is a build-time constant which specifies whether or + // not access to the unsafe package is available. + UnsafeDisabled = true +) + +// unsafeReflectValue typically converts the passed reflect.Value into a one +// that bypasses the typical safety restrictions preventing access to +// unaddressable and unexported data. However, doing this relies on access to +// the unsafe package. This is a stub version which simply returns the passed +// reflect.Value when the unsafe package is not available. +func unsafeReflectValue(v reflect.Value) reflect.Value { + return v +} diff --git a/vendor/github.com/davecgh/go-spew/spew/common.go b/vendor/github.com/davecgh/go-spew/spew/common.go new file mode 100644 index 0000000..1be8ce9 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/common.go @@ -0,0 +1,341 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "reflect" + "sort" + "strconv" +) + +// Some constants in the form of bytes to avoid string overhead. This mirrors +// the technique used in the fmt package. +var ( + panicBytes = []byte("(PANIC=") + plusBytes = []byte("+") + iBytes = []byte("i") + trueBytes = []byte("true") + falseBytes = []byte("false") + interfaceBytes = []byte("(interface {})") + commaNewlineBytes = []byte(",\n") + newlineBytes = []byte("\n") + openBraceBytes = []byte("{") + openBraceNewlineBytes = []byte("{\n") + closeBraceBytes = []byte("}") + asteriskBytes = []byte("*") + colonBytes = []byte(":") + colonSpaceBytes = []byte(": ") + openParenBytes = []byte("(") + closeParenBytes = []byte(")") + spaceBytes = []byte(" ") + pointerChainBytes = []byte("->") + nilAngleBytes = []byte("") + maxNewlineBytes = []byte("\n") + maxShortBytes = []byte("") + circularBytes = []byte("") + circularShortBytes = []byte("") + invalidAngleBytes = []byte("") + openBracketBytes = []byte("[") + closeBracketBytes = []byte("]") + percentBytes = []byte("%") + precisionBytes = []byte(".") + openAngleBytes = []byte("<") + closeAngleBytes = []byte(">") + openMapBytes = []byte("map[") + closeMapBytes = []byte("]") + lenEqualsBytes = []byte("len=") + capEqualsBytes = []byte("cap=") +) + +// hexDigits is used to map a decimal value to a hex digit. +var hexDigits = "0123456789abcdef" + +// catchPanic handles any panics that might occur during the handleMethods +// calls. +func catchPanic(w io.Writer, v reflect.Value) { + if err := recover(); err != nil { + w.Write(panicBytes) + fmt.Fprintf(w, "%v", err) + w.Write(closeParenBytes) + } +} + +// handleMethods attempts to call the Error and String methods on the underlying +// type the passed reflect.Value represents and outputes the result to Writer w. +// +// It handles panics in any called methods by catching and displaying the error +// as the formatted value. +func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) { + // We need an interface to check if the type implements the error or + // Stringer interface. However, the reflect package won't give us an + // interface on certain things like unexported struct fields in order + // to enforce visibility rules. We use unsafe, when it's available, + // to bypass these restrictions since this package does not mutate the + // values. + if !v.CanInterface() { + if UnsafeDisabled { + return false + } + + v = unsafeReflectValue(v) + } + + // Choose whether or not to do error and Stringer interface lookups against + // the base type or a pointer to the base type depending on settings. + // Technically calling one of these methods with a pointer receiver can + // mutate the value, however, types which choose to satisify an error or + // Stringer interface with a pointer receiver should not be mutating their + // state inside these interface methods. + if !cs.DisablePointerMethods && !UnsafeDisabled && !v.CanAddr() { + v = unsafeReflectValue(v) + } + if v.CanAddr() { + v = v.Addr() + } + + // Is it an error or Stringer? + switch iface := v.Interface().(type) { + case error: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.Error())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + + w.Write([]byte(iface.Error())) + return true + + case fmt.Stringer: + defer catchPanic(w, v) + if cs.ContinueOnMethod { + w.Write(openParenBytes) + w.Write([]byte(iface.String())) + w.Write(closeParenBytes) + w.Write(spaceBytes) + return false + } + w.Write([]byte(iface.String())) + return true + } + return false +} + +// printBool outputs a boolean value as true or false to Writer w. +func printBool(w io.Writer, val bool) { + if val { + w.Write(trueBytes) + } else { + w.Write(falseBytes) + } +} + +// printInt outputs a signed integer value to Writer w. +func printInt(w io.Writer, val int64, base int) { + w.Write([]byte(strconv.FormatInt(val, base))) +} + +// printUint outputs an unsigned integer value to Writer w. +func printUint(w io.Writer, val uint64, base int) { + w.Write([]byte(strconv.FormatUint(val, base))) +} + +// printFloat outputs a floating point value using the specified precision, +// which is expected to be 32 or 64bit, to Writer w. +func printFloat(w io.Writer, val float64, precision int) { + w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision))) +} + +// printComplex outputs a complex value using the specified float precision +// for the real and imaginary parts to Writer w. +func printComplex(w io.Writer, c complex128, floatPrecision int) { + r := real(c) + w.Write(openParenBytes) + w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision))) + i := imag(c) + if i >= 0 { + w.Write(plusBytes) + } + w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision))) + w.Write(iBytes) + w.Write(closeParenBytes) +} + +// printHexPtr outputs a uintptr formatted as hexadecimal with a leading '0x' +// prefix to Writer w. +func printHexPtr(w io.Writer, p uintptr) { + // Null pointer. + num := uint64(p) + if num == 0 { + w.Write(nilAngleBytes) + return + } + + // Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix + buf := make([]byte, 18) + + // It's simpler to construct the hex string right to left. + base := uint64(16) + i := len(buf) - 1 + for num >= base { + buf[i] = hexDigits[num%base] + num /= base + i-- + } + buf[i] = hexDigits[num] + + // Add '0x' prefix. + i-- + buf[i] = 'x' + i-- + buf[i] = '0' + + // Strip unused leading bytes. + buf = buf[i:] + w.Write(buf) +} + +// valuesSorter implements sort.Interface to allow a slice of reflect.Value +// elements to be sorted. +type valuesSorter struct { + values []reflect.Value + strings []string // either nil or same len and values + cs *ConfigState +} + +// newValuesSorter initializes a valuesSorter instance, which holds a set of +// surrogate keys on which the data should be sorted. It uses flags in +// ConfigState to decide if and how to populate those surrogate keys. +func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface { + vs := &valuesSorter{values: values, cs: cs} + if canSortSimply(vs.values[0].Kind()) { + return vs + } + if !cs.DisableMethods { + vs.strings = make([]string, len(values)) + for i := range vs.values { + b := bytes.Buffer{} + if !handleMethods(cs, &b, vs.values[i]) { + vs.strings = nil + break + } + vs.strings[i] = b.String() + } + } + if vs.strings == nil && cs.SpewKeys { + vs.strings = make([]string, len(values)) + for i := range vs.values { + vs.strings[i] = Sprintf("%#v", vs.values[i].Interface()) + } + } + return vs +} + +// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted +// directly, or whether it should be considered for sorting by surrogate keys +// (if the ConfigState allows it). +func canSortSimply(kind reflect.Kind) bool { + // This switch parallels valueSortLess, except for the default case. + switch kind { + case reflect.Bool: + return true + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return true + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return true + case reflect.Float32, reflect.Float64: + return true + case reflect.String: + return true + case reflect.Uintptr: + return true + case reflect.Array: + return true + } + return false +} + +// Len returns the number of values in the slice. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Len() int { + return len(s.values) +} + +// Swap swaps the values at the passed indices. It is part of the +// sort.Interface implementation. +func (s *valuesSorter) Swap(i, j int) { + s.values[i], s.values[j] = s.values[j], s.values[i] + if s.strings != nil { + s.strings[i], s.strings[j] = s.strings[j], s.strings[i] + } +} + +// valueSortLess returns whether the first value should sort before the second +// value. It is used by valueSorter.Less as part of the sort.Interface +// implementation. +func valueSortLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Bool: + return !a.Bool() && b.Bool() + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + return a.Int() < b.Int() + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + return a.Uint() < b.Uint() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.String: + return a.String() < b.String() + case reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Array: + // Compare the contents of both arrays. + l := a.Len() + for i := 0; i < l; i++ { + av := a.Index(i) + bv := b.Index(i) + if av.Interface() == bv.Interface() { + continue + } + return valueSortLess(av, bv) + } + } + return a.String() < b.String() +} + +// Less returns whether the value at index i should sort before the +// value at index j. It is part of the sort.Interface implementation. +func (s *valuesSorter) Less(i, j int) bool { + if s.strings == nil { + return valueSortLess(s.values[i], s.values[j]) + } + return s.strings[i] < s.strings[j] +} + +// sortValues is a sort function that handles both native types and any type that +// can be converted to error or Stringer. Other inputs are sorted according to +// their Value.String() value to ensure display stability. +func sortValues(values []reflect.Value, cs *ConfigState) { + if len(values) == 0 { + return + } + sort.Sort(newValuesSorter(values, cs)) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/config.go b/vendor/github.com/davecgh/go-spew/spew/config.go new file mode 100644 index 0000000..2e3d22f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/config.go @@ -0,0 +1,306 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "io" + "os" +) + +// ConfigState houses the configuration options used by spew to format and +// display values. There is a global instance, Config, that is used to control +// all top-level Formatter and Dump functionality. Each ConfigState instance +// provides methods equivalent to the top-level functions. +// +// The zero value for ConfigState provides no indentation. You would typically +// want to set it to a space or a tab. +// +// Alternatively, you can use NewDefaultConfig to get a ConfigState instance +// with default settings. See the documentation of NewDefaultConfig for default +// values. +type ConfigState struct { + // Indent specifies the string to use for each indentation level. The + // global config instance that all top-level functions use set this to a + // single space by default. If you would like more indentation, you might + // set this to a tab with "\t" or perhaps two spaces with " ". + Indent string + + // MaxDepth controls the maximum number of levels to descend into nested + // data structures. The default, 0, means there is no limit. + // + // NOTE: Circular data structures are properly detected, so it is not + // necessary to set this value unless you specifically want to limit deeply + // nested data structures. + MaxDepth int + + // DisableMethods specifies whether or not error and Stringer interfaces are + // invoked for types that implement them. + DisableMethods bool + + // DisablePointerMethods specifies whether or not to check for and invoke + // error and Stringer interfaces on types which only accept a pointer + // receiver when the current type is not a pointer. + // + // NOTE: This might be an unsafe action since calling one of these methods + // with a pointer receiver could technically mutate the value, however, + // in practice, types which choose to satisify an error or Stringer + // interface with a pointer receiver should not be mutating their state + // inside these interface methods. As a result, this option relies on + // access to the unsafe package, so it will not have any effect when + // running in environments without access to the unsafe package such as + // Google App Engine or with the "safe" build tag specified. + DisablePointerMethods bool + + // DisablePointerAddresses specifies whether to disable the printing of + // pointer addresses. This is useful when diffing data structures in tests. + DisablePointerAddresses bool + + // DisableCapacities specifies whether to disable the printing of capacities + // for arrays, slices, maps and channels. This is useful when diffing + // data structures in tests. + DisableCapacities bool + + // ContinueOnMethod specifies whether or not recursion should continue once + // a custom error or Stringer interface is invoked. The default, false, + // means it will print the results of invoking the custom error or Stringer + // interface and return immediately instead of continuing to recurse into + // the internals of the data type. + // + // NOTE: This flag does not have any effect if method invocation is disabled + // via the DisableMethods or DisablePointerMethods options. + ContinueOnMethod bool + + // SortKeys specifies map keys should be sorted before being printed. Use + // this to have a more deterministic, diffable output. Note that only + // native types (bool, int, uint, floats, uintptr and string) and types + // that support the error or Stringer interfaces (if methods are + // enabled) are supported, with other types sorted according to the + // reflect.Value.String() output which guarantees display stability. + SortKeys bool + + // SpewKeys specifies that, as a last resort attempt, map keys should + // be spewed to strings and sorted by those strings. This is only + // considered if SortKeys is true. + SpewKeys bool +} + +// Config is the active configuration of the top-level functions. +// The configuration can be changed by modifying the contents of spew.Config. +var Config = ConfigState{Indent: " "} + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the formatted string as a value that satisfies error. See NewFormatter +// for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, c.convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, c.convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, c.convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a Formatter interface returned by c.NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, c.convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Print(a ...interface{}) (n int, err error) { + return fmt.Print(c.convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, c.convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Println(a ...interface{}) (n int, err error) { + return fmt.Println(c.convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprint(a ...interface{}) string { + return fmt.Sprint(c.convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a Formatter interface returned by c.NewFormatter. It returns +// the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, c.convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a Formatter interface returned by c.NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b)) +func (c *ConfigState) Sprintln(a ...interface{}) string { + return fmt.Sprintln(c.convertArgs(a)...) +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +c.Printf, c.Println, or c.Printf. +*/ +func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(c, v) +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) { + fdump(c, w, a...) +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by modifying the public members +of c. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func (c *ConfigState) Dump(a ...interface{}) { + fdump(c, os.Stdout, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func (c *ConfigState) Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(c, &buf, a...) + return buf.String() +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a spew Formatter interface using +// the ConfigState associated with s. +func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = newFormatter(c, arg) + } + return formatters +} + +// NewDefaultConfig returns a ConfigState with the following default settings. +// +// Indent: " " +// MaxDepth: 0 +// DisableMethods: false +// DisablePointerMethods: false +// ContinueOnMethod: false +// SortKeys: false +func NewDefaultConfig() *ConfigState { + return &ConfigState{Indent: " "} +} diff --git a/vendor/github.com/davecgh/go-spew/spew/doc.go b/vendor/github.com/davecgh/go-spew/spew/doc.go new file mode 100644 index 0000000..aacaac6 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/doc.go @@ -0,0 +1,211 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +/* +Package spew implements a deep pretty printer for Go data structures to aid in +debugging. + +A quick overview of the additional features spew provides over the built-in +printing facilities for Go data types are as follows: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output (only when using + Dump style) + +There are two different approaches spew allows for dumping Go data structures: + + * Dump style which prints with newlines, customizable indentation, + and additional debug information such as types and all pointer addresses + used to indirect to the final value + * A custom Formatter interface that integrates cleanly with the standard fmt + package and replaces %v, %+v, %#v, and %#+v to provide inline printing + similar to the default %v while providing the additional functionality + outlined above and passing unsupported format verbs such as %x and %q + along to fmt + +Quick Start + +This section demonstrates how to quickly get started with spew. See the +sections below for further details on formatting and configuration options. + +To dump a variable with full newlines, indentation, type, and pointer +information use Dump, Fdump, or Sdump: + spew.Dump(myVar1, myVar2, ...) + spew.Fdump(someWriter, myVar1, myVar2, ...) + str := spew.Sdump(myVar1, myVar2, ...) + +Alternatively, if you would prefer to use format strings with a compacted inline +printing style, use the convenience wrappers Printf, Fprintf, etc with +%v (most compact), %+v (adds pointer addresses), %#v (adds types), or +%#+v (adds types and pointer addresses): + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +Configuration Options + +Configuration of spew is handled by fields in the ConfigState type. For +convenience, all of the top-level functions use a global state available +via the spew.Config global. + +It is also possible to create a ConfigState instance that provides methods +equivalent to the top-level functions. This allows concurrent configuration +options. See the ConfigState documentation for more details. + +The following configuration options are available: + * Indent + String to use for each indentation level for Dump functions. + It is a single space by default. A popular alternative is "\t". + + * MaxDepth + Maximum number of levels to descend into nested data structures. + There is no limit by default. + + * DisableMethods + Disables invocation of error and Stringer interface methods. + Method invocation is enabled by default. + + * DisablePointerMethods + Disables invocation of error and Stringer interface methods on types + which only accept pointer receivers from non-pointer variables. + Pointer method invocation is enabled by default. + + * DisablePointerAddresses + DisablePointerAddresses specifies whether to disable the printing of + pointer addresses. This is useful when diffing data structures in tests. + + * DisableCapacities + DisableCapacities specifies whether to disable the printing of + capacities for arrays, slices, maps and channels. This is useful when + diffing data structures in tests. + + * ContinueOnMethod + Enables recursion into types after invoking error and Stringer interface + methods. Recursion after method invocation is disabled by default. + + * SortKeys + Specifies map keys should be sorted before being printed. Use + this to have a more deterministic, diffable output. Note that + only native types (bool, int, uint, floats, uintptr and string) + and types which implement error or Stringer interfaces are + supported with other types sorted according to the + reflect.Value.String() output which guarantees display + stability. Natural map order is used by default. + + * SpewKeys + Specifies that, as a last resort attempt, map keys should be + spewed to strings and sorted by those strings. This is only + considered if SortKeys is true. + +Dump Usage + +Simply call spew.Dump with a list of variables you want to dump: + + spew.Dump(myVar1, myVar2, ...) + +You may also call spew.Fdump if you would prefer to output to an arbitrary +io.Writer. For example, to dump to standard error: + + spew.Fdump(os.Stderr, myVar1, myVar2, ...) + +A third option is to call spew.Sdump to get the formatted output as a string: + + str := spew.Sdump(myVar1, myVar2, ...) + +Sample Dump Output + +See the Dump example for details on the setup of the types and variables being +shown here. + + (main.Foo) { + unexportedField: (*main.Bar)(0xf84002e210)({ + flag: (main.Flag) flagTwo, + data: (uintptr) + }), + ExportedField: (map[interface {}]interface {}) (len=1) { + (string) (len=3) "one": (bool) true + } + } + +Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C +command as shown. + ([]uint8) (len=32 cap=32) { + 00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... | + 00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0| + 00000020 31 32 |12| + } + +Custom Formatter + +Spew provides a custom formatter that implements the fmt.Formatter interface +so that it integrates cleanly with standard fmt package printing functions. The +formatter is useful for inline printing of smaller data types similar to the +standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Custom Formatter Usage + +The simplest way to make use of the spew custom formatter is to call one of the +convenience functions such as spew.Printf, spew.Println, or spew.Printf. The +functions have syntax you are most likely already familiar with: + + spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + spew.Println(myVar, myVar2) + spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2) + spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4) + +See the Index for the full list convenience functions. + +Sample Formatter Output + +Double pointer to a uint8: + %v: <**>5 + %+v: <**>(0xf8400420d0->0xf8400420c8)5 + %#v: (**uint8)5 + %#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5 + +Pointer to circular struct with a uint8 field and a pointer to itself: + %v: <*>{1 <*>} + %+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)} + %#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)} + %#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)} + +See the Printf example for details on the setup of variables being shown +here. + +Errors + +Since it is possible for custom Stringer/error interfaces to panic, spew +detects them and handles them internally by printing the panic information +inline with the output. Since spew is intended to provide deep pretty printing +capabilities on structures, it intentionally does not return any errors. +*/ +package spew diff --git a/vendor/github.com/davecgh/go-spew/spew/dump.go b/vendor/github.com/davecgh/go-spew/spew/dump.go new file mode 100644 index 0000000..f78d89f --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dump.go @@ -0,0 +1,509 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "encoding/hex" + "fmt" + "io" + "os" + "reflect" + "regexp" + "strconv" + "strings" +) + +var ( + // uint8Type is a reflect.Type representing a uint8. It is used to + // convert cgo types to uint8 slices for hexdumping. + uint8Type = reflect.TypeOf(uint8(0)) + + // cCharRE is a regular expression that matches a cgo char. + // It is used to detect character arrays to hexdump them. + cCharRE = regexp.MustCompile(`^.*\._Ctype_char$`) + + // cUnsignedCharRE is a regular expression that matches a cgo unsigned + // char. It is used to detect unsigned character arrays to hexdump + // them. + cUnsignedCharRE = regexp.MustCompile(`^.*\._Ctype_unsignedchar$`) + + // cUint8tCharRE is a regular expression that matches a cgo uint8_t. + // It is used to detect uint8_t arrays to hexdump them. + cUint8tCharRE = regexp.MustCompile(`^.*\._Ctype_uint8_t$`) +) + +// dumpState contains information about the state of a dump operation. +type dumpState struct { + w io.Writer + depth int + pointers map[uintptr]int + ignoreNextType bool + ignoreNextIndent bool + cs *ConfigState +} + +// indent performs indentation according to the depth level and cs.Indent +// option. +func (d *dumpState) indent() { + if d.ignoreNextIndent { + d.ignoreNextIndent = false + return + } + d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth)) +} + +// unpackValue returns values inside of non-nil interfaces when possible. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (d *dumpState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface && !v.IsNil() { + v = v.Elem() + } + return v +} + +// dumpPtr handles formatting of pointers by indirecting them as necessary. +func (d *dumpState) dumpPtr(v reflect.Value) { + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range d.pointers { + if depth >= d.depth { + delete(d.pointers, k) + } + } + + // Keep list of all dereferenced pointers to show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by dereferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := d.pointers[addr]; ok && pd < d.depth { + cycleFound = true + indirects-- + break + } + d.pointers[addr] = d.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type information. + d.w.Write(openParenBytes) + d.w.Write(bytes.Repeat(asteriskBytes, indirects)) + d.w.Write([]byte(ve.Type().String())) + d.w.Write(closeParenBytes) + + // Display pointer information. + if !d.cs.DisablePointerAddresses && len(pointerChain) > 0 { + d.w.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + d.w.Write(pointerChainBytes) + } + printHexPtr(d.w, addr) + } + d.w.Write(closeParenBytes) + } + + // Display dereferenced value. + d.w.Write(openParenBytes) + switch { + case nilFound: + d.w.Write(nilAngleBytes) + + case cycleFound: + d.w.Write(circularBytes) + + default: + d.ignoreNextType = true + d.dump(ve) + } + d.w.Write(closeParenBytes) +} + +// dumpSlice handles formatting of arrays and slices. Byte (uint8 under +// reflection) arrays and slices are dumped in hexdump -C fashion. +func (d *dumpState) dumpSlice(v reflect.Value) { + // Determine whether this type should be hex dumped or not. Also, + // for types which should be hexdumped, try to use the underlying data + // first, then fall back to trying to convert them to a uint8 slice. + var buf []uint8 + doConvert := false + doHexDump := false + numEntries := v.Len() + if numEntries > 0 { + vt := v.Index(0).Type() + vts := vt.String() + switch { + // C types that need to be converted. + case cCharRE.MatchString(vts): + fallthrough + case cUnsignedCharRE.MatchString(vts): + fallthrough + case cUint8tCharRE.MatchString(vts): + doConvert = true + + // Try to use existing uint8 slices and fall back to converting + // and copying if that fails. + case vt.Kind() == reflect.Uint8: + // We need an addressable interface to convert the type + // to a byte slice. However, the reflect package won't + // give us an interface on certain things like + // unexported struct fields in order to enforce + // visibility rules. We use unsafe, when available, to + // bypass these restrictions since this package does not + // mutate the values. + vs := v + if !vs.CanInterface() || !vs.CanAddr() { + vs = unsafeReflectValue(vs) + } + if !UnsafeDisabled { + vs = vs.Slice(0, numEntries) + + // Use the existing uint8 slice if it can be + // type asserted. + iface := vs.Interface() + if slice, ok := iface.([]uint8); ok { + buf = slice + doHexDump = true + break + } + } + + // The underlying data needs to be converted if it can't + // be type asserted to a uint8 slice. + doConvert = true + } + + // Copy and convert the underlying type if needed. + if doConvert && vt.ConvertibleTo(uint8Type) { + // Convert and copy each element into a uint8 byte + // slice. + buf = make([]uint8, numEntries) + for i := 0; i < numEntries; i++ { + vv := v.Index(i) + buf[i] = uint8(vv.Convert(uint8Type).Uint()) + } + doHexDump = true + } + } + + // Hexdump the entire slice as needed. + if doHexDump { + indent := strings.Repeat(d.cs.Indent, d.depth) + str := indent + hex.Dump(buf) + str = strings.Replace(str, "\n", "\n"+indent, -1) + str = strings.TrimRight(str, d.cs.Indent) + d.w.Write([]byte(str)) + return + } + + // Recursively call dump for each item. + for i := 0; i < numEntries; i++ { + d.dump(d.unpackValue(v.Index(i))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } +} + +// dump is the main workhorse for dumping a value. It uses the passed reflect +// value to figure out what kind of object we are dealing with and formats it +// appropriately. It is a recursive function, however circular data structures +// are detected and handled properly. +func (d *dumpState) dump(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + d.w.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + d.indent() + d.dumpPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !d.ignoreNextType { + d.indent() + d.w.Write(openParenBytes) + d.w.Write([]byte(v.Type().String())) + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + d.ignoreNextType = false + + // Display length and capacity if the built-in len and cap functions + // work with the value's kind and the len/cap itself is non-zero. + valueLen, valueCap := 0, 0 + switch v.Kind() { + case reflect.Array, reflect.Slice, reflect.Chan: + valueLen, valueCap = v.Len(), v.Cap() + case reflect.Map, reflect.String: + valueLen = v.Len() + } + if valueLen != 0 || !d.cs.DisableCapacities && valueCap != 0 { + d.w.Write(openParenBytes) + if valueLen != 0 { + d.w.Write(lenEqualsBytes) + printInt(d.w, int64(valueLen), 10) + } + if !d.cs.DisableCapacities && valueCap != 0 { + if valueLen != 0 { + d.w.Write(spaceBytes) + } + d.w.Write(capEqualsBytes) + printInt(d.w, int64(valueCap), 10) + } + d.w.Write(closeParenBytes) + d.w.Write(spaceBytes) + } + + // Call Stringer/error interfaces if they exist and the handle methods flag + // is enabled + if !d.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(d.cs, d.w, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(d.w, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(d.w, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(d.w, v.Uint(), 10) + + case reflect.Float32: + printFloat(d.w, v.Float(), 32) + + case reflect.Float64: + printFloat(d.w, v.Float(), 64) + + case reflect.Complex64: + printComplex(d.w, v.Complex(), 32) + + case reflect.Complex128: + printComplex(d.w, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + d.dumpSlice(v) + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.String: + d.w.Write([]byte(strconv.Quote(v.String()))) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + d.w.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + d.w.Write(nilAngleBytes) + break + } + + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + numEntries := v.Len() + keys := v.MapKeys() + if d.cs.SortKeys { + sortValues(keys, d.cs) + } + for i, key := range keys { + d.dump(d.unpackValue(key)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.MapIndex(key))) + if i < (numEntries - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Struct: + d.w.Write(openBraceNewlineBytes) + d.depth++ + if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) { + d.indent() + d.w.Write(maxNewlineBytes) + } else { + vt := v.Type() + numFields := v.NumField() + for i := 0; i < numFields; i++ { + d.indent() + vtf := vt.Field(i) + d.w.Write([]byte(vtf.Name)) + d.w.Write(colonSpaceBytes) + d.ignoreNextIndent = true + d.dump(d.unpackValue(v.Field(i))) + if i < (numFields - 1) { + d.w.Write(commaNewlineBytes) + } else { + d.w.Write(newlineBytes) + } + } + } + d.depth-- + d.indent() + d.w.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(d.w, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(d.w, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it in case any new + // types are added. + default: + if v.CanInterface() { + fmt.Fprintf(d.w, "%v", v.Interface()) + } else { + fmt.Fprintf(d.w, "%v", v.String()) + } + } +} + +// fdump is a helper function to consolidate the logic from the various public +// methods which take varying writers and config states. +func fdump(cs *ConfigState, w io.Writer, a ...interface{}) { + for _, arg := range a { + if arg == nil { + w.Write(interfaceBytes) + w.Write(spaceBytes) + w.Write(nilAngleBytes) + w.Write(newlineBytes) + continue + } + + d := dumpState{w: w, cs: cs} + d.pointers = make(map[uintptr]int) + d.dump(reflect.ValueOf(arg)) + d.w.Write(newlineBytes) + } +} + +// Fdump formats and displays the passed arguments to io.Writer w. It formats +// exactly the same as Dump. +func Fdump(w io.Writer, a ...interface{}) { + fdump(&Config, w, a...) +} + +// Sdump returns a string with the passed arguments formatted exactly the same +// as Dump. +func Sdump(a ...interface{}) string { + var buf bytes.Buffer + fdump(&Config, &buf, a...) + return buf.String() +} + +/* +Dump displays the passed parameters to standard out with newlines, customizable +indentation, and additional debug information such as complete types and all +pointer addresses used to indirect to the final value. It provides the +following features over the built-in printing facilities provided by the fmt +package: + + * Pointers are dereferenced and followed + * Circular data structures are detected and handled properly + * Custom Stringer/error interfaces are optionally invoked, including + on unexported types + * Custom types which only implement the Stringer/error interfaces via + a pointer receiver are optionally invoked when passing non-pointer + variables + * Byte arrays and slices are dumped like the hexdump -C command which + includes offsets, byte values in hex, and ASCII output + +The configuration options are controlled by an exported package global, +spew.Config. See ConfigState for options documentation. + +See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to +get the formatted result as a string. +*/ +func Dump(a ...interface{}) { + fdump(&Config, os.Stdout, a...) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go new file mode 100644 index 0000000..6ab1808 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/dumpcgo_test.go @@ -0,0 +1,99 @@ +// Copyright (c) 2013-2016 Dave Collins +// +// Permission to use, copy, modify, and distribute this software for any +// purpose with or without fee is hereby granted, provided that the above +// copyright notice and this permission notice appear in all copies. +// +// THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +// WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +// MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +// ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +// WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +// ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +// OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +// NOTE: Due to the following build constraints, this file will only be compiled +// when both cgo is supported and "-tags testcgo" is added to the go test +// command line. This means the cgo tests are only added (and hence run) when +// specifially requested. This configuration is used because spew itself +// does not require cgo to run even though it does handle certain cgo types +// specially. Rather than forcing all clients to require cgo and an external +// C compiler just to run the tests, this scheme makes them optional. +// +build cgo,testcgo + +package spew_test + +import ( + "fmt" + + "github.com/davecgh/go-spew/spew/testdata" +) + +func addCgoDumpTests() { + // C char pointer. + v := testdata.GetCgoCharPointer() + nv := testdata.GetCgoNullCharPointer() + pv := &v + vcAddr := fmt.Sprintf("%p", v) + vAddr := fmt.Sprintf("%p", pv) + pvAddr := fmt.Sprintf("%p", &pv) + vt := "*testdata._Ctype_char" + vs := "116" + addDumpTest(v, "("+vt+")("+vcAddr+")("+vs+")\n") + addDumpTest(pv, "(*"+vt+")("+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(&pv, "(**"+vt+")("+pvAddr+"->"+vAddr+"->"+vcAddr+")("+vs+")\n") + addDumpTest(nv, "("+vt+")()\n") + + // C char array. + v2, v2l, v2c := testdata.GetCgoCharArray() + v2Len := fmt.Sprintf("%d", v2l) + v2Cap := fmt.Sprintf("%d", v2c) + v2t := "[6]testdata._Ctype_char" + v2s := "(len=" + v2Len + " cap=" + v2Cap + ") " + + "{\n 00000000 74 65 73 74 32 00 " + + " |test2.|\n}" + addDumpTest(v2, "("+v2t+") "+v2s+"\n") + + // C unsigned char array. + v3, v3l, v3c := testdata.GetCgoUnsignedCharArray() + v3Len := fmt.Sprintf("%d", v3l) + v3Cap := fmt.Sprintf("%d", v3c) + v3t := "[6]testdata._Ctype_unsignedchar" + v3t2 := "[6]testdata._Ctype_uchar" + v3s := "(len=" + v3Len + " cap=" + v3Cap + ") " + + "{\n 00000000 74 65 73 74 33 00 " + + " |test3.|\n}" + addDumpTest(v3, "("+v3t+") "+v3s+"\n", "("+v3t2+") "+v3s+"\n") + + // C signed char array. + v4, v4l, v4c := testdata.GetCgoSignedCharArray() + v4Len := fmt.Sprintf("%d", v4l) + v4Cap := fmt.Sprintf("%d", v4c) + v4t := "[6]testdata._Ctype_schar" + v4t2 := "testdata._Ctype_schar" + v4s := "(len=" + v4Len + " cap=" + v4Cap + ") " + + "{\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 101,\n (" + v4t2 + + ") 115,\n (" + v4t2 + ") 116,\n (" + v4t2 + ") 52,\n (" + v4t2 + + ") 0\n}" + addDumpTest(v4, "("+v4t+") "+v4s+"\n") + + // C uint8_t array. + v5, v5l, v5c := testdata.GetCgoUint8tArray() + v5Len := fmt.Sprintf("%d", v5l) + v5Cap := fmt.Sprintf("%d", v5c) + v5t := "[6]testdata._Ctype_uint8_t" + v5s := "(len=" + v5Len + " cap=" + v5Cap + ") " + + "{\n 00000000 74 65 73 74 35 00 " + + " |test5.|\n}" + addDumpTest(v5, "("+v5t+") "+v5s+"\n") + + // C typedefed unsigned char array. + v6, v6l, v6c := testdata.GetCgoTypdefedUnsignedCharArray() + v6Len := fmt.Sprintf("%d", v6l) + v6Cap := fmt.Sprintf("%d", v6c) + v6t := "[6]testdata._Ctype_custom_uchar_t" + v6s := "(len=" + v6Len + " cap=" + v6Cap + ") " + + "{\n 00000000 74 65 73 74 36 00 " + + " |test6.|\n}" + addDumpTest(v6, "("+v6t+") "+v6s+"\n") +} diff --git a/vendor/github.com/davecgh/go-spew/spew/format.go b/vendor/github.com/davecgh/go-spew/spew/format.go new file mode 100644 index 0000000..b04edb7 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/format.go @@ -0,0 +1,419 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "strings" +) + +// supportedFlags is a list of all the character flags supported by fmt package. +const supportedFlags = "0-+# " + +// formatState implements the fmt.Formatter interface and contains information +// about the state of a formatting operation. The NewFormatter function can +// be used to get a new Formatter which can be used directly as arguments +// in standard fmt package printing calls. +type formatState struct { + value interface{} + fs fmt.State + depth int + pointers map[uintptr]int + ignoreNextType bool + cs *ConfigState +} + +// buildDefaultFormat recreates the original format string without precision +// and width information to pass in to fmt.Sprintf in the case of an +// unrecognized type. Unless new types are added to the language, this +// function won't ever be called. +func (f *formatState) buildDefaultFormat() (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + buf.WriteRune('v') + + format = buf.String() + return format +} + +// constructOrigFormat recreates the original format string including precision +// and width information to pass along to the standard fmt package. This allows +// automatic deferral of all format strings this package doesn't support. +func (f *formatState) constructOrigFormat(verb rune) (format string) { + buf := bytes.NewBuffer(percentBytes) + + for _, flag := range supportedFlags { + if f.fs.Flag(int(flag)) { + buf.WriteRune(flag) + } + } + + if width, ok := f.fs.Width(); ok { + buf.WriteString(strconv.Itoa(width)) + } + + if precision, ok := f.fs.Precision(); ok { + buf.Write(precisionBytes) + buf.WriteString(strconv.Itoa(precision)) + } + + buf.WriteRune(verb) + + format = buf.String() + return format +} + +// unpackValue returns values inside of non-nil interfaces when possible and +// ensures that types for values which have been unpacked from an interface +// are displayed when the show types flag is also set. +// This is useful for data types like structs, arrays, slices, and maps which +// can contain varying types packed inside an interface. +func (f *formatState) unpackValue(v reflect.Value) reflect.Value { + if v.Kind() == reflect.Interface { + f.ignoreNextType = false + if !v.IsNil() { + v = v.Elem() + } + } + return v +} + +// formatPtr handles formatting of pointers by indirecting them as necessary. +func (f *formatState) formatPtr(v reflect.Value) { + // Display nil if top level pointer is nil. + showTypes := f.fs.Flag('#') + if v.IsNil() && (!showTypes || f.ignoreNextType) { + f.fs.Write(nilAngleBytes) + return + } + + // Remove pointers at or below the current depth from map used to detect + // circular refs. + for k, depth := range f.pointers { + if depth >= f.depth { + delete(f.pointers, k) + } + } + + // Keep list of all dereferenced pointers to possibly show later. + pointerChain := make([]uintptr, 0) + + // Figure out how many levels of indirection there are by derferencing + // pointers and unpacking interfaces down the chain while detecting circular + // references. + nilFound := false + cycleFound := false + indirects := 0 + ve := v + for ve.Kind() == reflect.Ptr { + if ve.IsNil() { + nilFound = true + break + } + indirects++ + addr := ve.Pointer() + pointerChain = append(pointerChain, addr) + if pd, ok := f.pointers[addr]; ok && pd < f.depth { + cycleFound = true + indirects-- + break + } + f.pointers[addr] = f.depth + + ve = ve.Elem() + if ve.Kind() == reflect.Interface { + if ve.IsNil() { + nilFound = true + break + } + ve = ve.Elem() + } + } + + // Display type or indirection level depending on flags. + if showTypes && !f.ignoreNextType { + f.fs.Write(openParenBytes) + f.fs.Write(bytes.Repeat(asteriskBytes, indirects)) + f.fs.Write([]byte(ve.Type().String())) + f.fs.Write(closeParenBytes) + } else { + if nilFound || cycleFound { + indirects += strings.Count(ve.Type().String(), "*") + } + f.fs.Write(openAngleBytes) + f.fs.Write([]byte(strings.Repeat("*", indirects))) + f.fs.Write(closeAngleBytes) + } + + // Display pointer information depending on flags. + if f.fs.Flag('+') && (len(pointerChain) > 0) { + f.fs.Write(openParenBytes) + for i, addr := range pointerChain { + if i > 0 { + f.fs.Write(pointerChainBytes) + } + printHexPtr(f.fs, addr) + } + f.fs.Write(closeParenBytes) + } + + // Display dereferenced value. + switch { + case nilFound: + f.fs.Write(nilAngleBytes) + + case cycleFound: + f.fs.Write(circularShortBytes) + + default: + f.ignoreNextType = true + f.format(ve) + } +} + +// format is the main workhorse for providing the Formatter interface. It +// uses the passed reflect value to figure out what kind of object we are +// dealing with and formats it appropriately. It is a recursive function, +// however circular data structures are detected and handled properly. +func (f *formatState) format(v reflect.Value) { + // Handle invalid reflect values immediately. + kind := v.Kind() + if kind == reflect.Invalid { + f.fs.Write(invalidAngleBytes) + return + } + + // Handle pointers specially. + if kind == reflect.Ptr { + f.formatPtr(v) + return + } + + // Print type information unless already handled elsewhere. + if !f.ignoreNextType && f.fs.Flag('#') { + f.fs.Write(openParenBytes) + f.fs.Write([]byte(v.Type().String())) + f.fs.Write(closeParenBytes) + } + f.ignoreNextType = false + + // Call Stringer/error interfaces if they exist and the handle methods + // flag is enabled. + if !f.cs.DisableMethods { + if (kind != reflect.Invalid) && (kind != reflect.Interface) { + if handled := handleMethods(f.cs, f.fs, v); handled { + return + } + } + } + + switch kind { + case reflect.Invalid: + // Do nothing. We should never get here since invalid has already + // been handled above. + + case reflect.Bool: + printBool(f.fs, v.Bool()) + + case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int: + printInt(f.fs, v.Int(), 10) + + case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint: + printUint(f.fs, v.Uint(), 10) + + case reflect.Float32: + printFloat(f.fs, v.Float(), 32) + + case reflect.Float64: + printFloat(f.fs, v.Float(), 64) + + case reflect.Complex64: + printComplex(f.fs, v.Complex(), 32) + + case reflect.Complex128: + printComplex(f.fs, v.Complex(), 64) + + case reflect.Slice: + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + fallthrough + + case reflect.Array: + f.fs.Write(openBracketBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + numEntries := v.Len() + for i := 0; i < numEntries; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(v.Index(i))) + } + } + f.depth-- + f.fs.Write(closeBracketBytes) + + case reflect.String: + f.fs.Write([]byte(v.String())) + + case reflect.Interface: + // The only time we should get here is for nil interfaces due to + // unpackValue calls. + if v.IsNil() { + f.fs.Write(nilAngleBytes) + } + + case reflect.Ptr: + // Do nothing. We should never get here since pointers have already + // been handled above. + + case reflect.Map: + // nil maps should be indicated as different than empty maps + if v.IsNil() { + f.fs.Write(nilAngleBytes) + break + } + + f.fs.Write(openMapBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + keys := v.MapKeys() + if f.cs.SortKeys { + sortValues(keys, f.cs) + } + for i, key := range keys { + if i > 0 { + f.fs.Write(spaceBytes) + } + f.ignoreNextType = true + f.format(f.unpackValue(key)) + f.fs.Write(colonBytes) + f.ignoreNextType = true + f.format(f.unpackValue(v.MapIndex(key))) + } + } + f.depth-- + f.fs.Write(closeMapBytes) + + case reflect.Struct: + numFields := v.NumField() + f.fs.Write(openBraceBytes) + f.depth++ + if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) { + f.fs.Write(maxShortBytes) + } else { + vt := v.Type() + for i := 0; i < numFields; i++ { + if i > 0 { + f.fs.Write(spaceBytes) + } + vtf := vt.Field(i) + if f.fs.Flag('+') || f.fs.Flag('#') { + f.fs.Write([]byte(vtf.Name)) + f.fs.Write(colonBytes) + } + f.format(f.unpackValue(v.Field(i))) + } + } + f.depth-- + f.fs.Write(closeBraceBytes) + + case reflect.Uintptr: + printHexPtr(f.fs, uintptr(v.Uint())) + + case reflect.UnsafePointer, reflect.Chan, reflect.Func: + printHexPtr(f.fs, v.Pointer()) + + // There were not any other types at the time this code was written, but + // fall back to letting the default fmt package handle it if any get added. + default: + format := f.buildDefaultFormat() + if v.CanInterface() { + fmt.Fprintf(f.fs, format, v.Interface()) + } else { + fmt.Fprintf(f.fs, format, v.String()) + } + } +} + +// Format satisfies the fmt.Formatter interface. See NewFormatter for usage +// details. +func (f *formatState) Format(fs fmt.State, verb rune) { + f.fs = fs + + // Use standard formatting for verbs that are not v. + if verb != 'v' { + format := f.constructOrigFormat(verb) + fmt.Fprintf(fs, format, f.value) + return + } + + if f.value == nil { + if fs.Flag('#') { + fs.Write(interfaceBytes) + } + fs.Write(nilAngleBytes) + return + } + + f.format(reflect.ValueOf(f.value)) +} + +// newFormatter is a helper function to consolidate the logic from the various +// public methods which take varying config states. +func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter { + fs := &formatState{value: v, cs: cs} + fs.pointers = make(map[uintptr]int) + return fs +} + +/* +NewFormatter returns a custom formatter that satisfies the fmt.Formatter +interface. As a result, it integrates cleanly with standard fmt package +printing functions. The formatter is useful for inline printing of smaller data +types similar to the standard %v format specifier. + +The custom formatter only responds to the %v (most compact), %+v (adds pointer +addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb +combinations. Any other verbs such as %x and %q will be sent to the the +standard fmt package for formatting. In addition, the custom formatter ignores +the width and precision arguments (however they will still work on the format +specifiers not handled by the custom formatter). + +Typically this function shouldn't be called directly. It is much easier to make +use of the custom formatter by calling one of the convenience functions such as +Printf, Println, or Fprintf. +*/ +func NewFormatter(v interface{}) fmt.Formatter { + return newFormatter(&Config, v) +} diff --git a/vendor/github.com/davecgh/go-spew/spew/spew.go b/vendor/github.com/davecgh/go-spew/spew/spew.go new file mode 100644 index 0000000..32c0e33 --- /dev/null +++ b/vendor/github.com/davecgh/go-spew/spew/spew.go @@ -0,0 +1,148 @@ +/* + * Copyright (c) 2013-2016 Dave Collins + * + * Permission to use, copy, modify, and distribute this software for any + * purpose with or without fee is hereby granted, provided that the above + * copyright notice and this permission notice appear in all copies. + * + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF + * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +package spew + +import ( + "fmt" + "io" +) + +// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the formatted string as a value that satisfies error. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Errorf(format string, a ...interface{}) (err error) { + return fmt.Errorf(format, convertArgs(a)...) +} + +// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprint(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprint(w, convertArgs(a)...) +} + +// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) { + return fmt.Fprintf(w, format, convertArgs(a)...) +} + +// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it +// passed with a default Formatter interface returned by NewFormatter. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b)) +func Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + return fmt.Fprintln(w, convertArgs(a)...) +} + +// Print is a wrapper for fmt.Print that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b)) +func Print(a ...interface{}) (n int, err error) { + return fmt.Print(convertArgs(a)...) +} + +// Printf is a wrapper for fmt.Printf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Printf(format string, a ...interface{}) (n int, err error) { + return fmt.Printf(format, convertArgs(a)...) +} + +// Println is a wrapper for fmt.Println that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the number of bytes written and any write error encountered. See +// NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b)) +func Println(a ...interface{}) (n int, err error) { + return fmt.Println(convertArgs(a)...) +} + +// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprint(a ...interface{}) string { + return fmt.Sprint(convertArgs(a)...) +} + +// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were +// passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintf(format string, a ...interface{}) string { + return fmt.Sprintf(format, convertArgs(a)...) +} + +// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it +// were passed with a default Formatter interface returned by NewFormatter. It +// returns the resulting string. See NewFormatter for formatting details. +// +// This function is shorthand for the following syntax: +// +// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b)) +func Sprintln(a ...interface{}) string { + return fmt.Sprintln(convertArgs(a)...) +} + +// convertArgs accepts a slice of arguments and returns a slice of the same +// length with each argument converted to a default spew Formatter interface. +func convertArgs(args []interface{}) (formatters []interface{}) { + formatters = make([]interface{}, len(args)) + for index, arg := range args { + formatters[index] = NewFormatter(arg) + } + return formatters +} diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go new file mode 100644 index 0000000..f88ca72 --- /dev/null +++ b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go @@ -0,0 +1,161 @@ +// Package breaker implements the circuit-breaker resiliency pattern for Go. +package breaker + +import ( + "errors" + "sync" + "sync/atomic" + "time" +) + +// ErrBreakerOpen is the error returned from Run() when the function is not executed +// because the breaker is currently open. +var ErrBreakerOpen = errors.New("circuit breaker is open") + +const ( + closed uint32 = iota + open + halfOpen +) + +// Breaker implements the circuit-breaker resiliency pattern +type Breaker struct { + errorThreshold, successThreshold int + timeout time.Duration + + lock sync.Mutex + state uint32 + errors, successes int + lastError time.Time +} + +// New constructs a new circuit-breaker that starts closed. +// From closed, the breaker opens if "errorThreshold" errors are seen +// without an error-free period of at least "timeout". From open, the +// breaker half-closes after "timeout". From half-open, the breaker closes +// after "successThreshold" consecutive successes, or opens on a single error. +func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker { + return &Breaker{ + errorThreshold: errorThreshold, + successThreshold: successThreshold, + timeout: timeout, + } +} + +// Run will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function and pass along its return +// value. It is safe to call Run concurrently on the same Breaker. +func (b *Breaker) Run(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + return b.doWork(state, work) +} + +// Go will either return ErrBreakerOpen immediately if the circuit-breaker is +// already open, or it will run the given function in a separate goroutine. +// If the function is run, Go will return nil immediately, and will *not* return +// the return value of the function. It is safe to call Go concurrently on the +// same Breaker. +func (b *Breaker) Go(work func() error) error { + state := atomic.LoadUint32(&b.state) + + if state == open { + return ErrBreakerOpen + } + + // errcheck complains about ignoring the error return value, but + // that's on purpose; if you want an error from a goroutine you have to + // get it over a channel or something + go b.doWork(state, work) + + return nil +} + +func (b *Breaker) doWork(state uint32, work func() error) error { + var panicValue interface{} + + result := func() error { + defer func() { + panicValue = recover() + }() + return work() + }() + + if result == nil && panicValue == nil && state == closed { + // short-circuit the normal, success path without contending + // on the lock + return nil + } + + // oh well, I guess we have to contend on the lock + b.processResult(result, panicValue) + + if panicValue != nil { + // as close as Go lets us come to a "rethrow" although unfortunately + // we lose the original panicing location + panic(panicValue) + } + + return result +} + +func (b *Breaker) processResult(result error, panicValue interface{}) { + b.lock.Lock() + defer b.lock.Unlock() + + if result == nil && panicValue == nil { + if b.state == halfOpen { + b.successes++ + if b.successes == b.successThreshold { + b.closeBreaker() + } + } + } else { + if b.errors > 0 { + expiry := b.lastError.Add(b.timeout) + if time.Now().After(expiry) { + b.errors = 0 + } + } + + switch b.state { + case closed: + b.errors++ + if b.errors == b.errorThreshold { + b.openBreaker() + } else { + b.lastError = time.Now() + } + case halfOpen: + b.openBreaker() + } + } +} + +func (b *Breaker) openBreaker() { + b.changeState(open) + go b.timer() +} + +func (b *Breaker) closeBreaker() { + b.changeState(closed) +} + +func (b *Breaker) timer() { + time.Sleep(b.timeout) + + b.lock.Lock() + defer b.lock.Unlock() + + b.changeState(halfOpen) +} + +func (b *Breaker) changeState(newState uint32) { + b.errors = 0 + b.successes = 0 + atomic.StoreUint32(&b.state, newState) +} diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go new file mode 100644 index 0000000..b8f8b51 --- /dev/null +++ b/vendor/github.com/eapache/go-xerial-snappy/snappy.go @@ -0,0 +1,43 @@ +package snappy + +import ( + "bytes" + "encoding/binary" + + master "github.com/golang/snappy" +) + +var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0} + +// Encode encodes data as snappy with no framing header. +func Encode(src []byte) []byte { + return master.Encode(nil, src) +} + +// Decode decodes snappy data whether it is traditional unframed +// or includes the xerial framing format. +func Decode(src []byte) ([]byte, error) { + if !bytes.Equal(src[:8], xerialHeader) { + return master.Decode(nil, src) + } + + var ( + pos = uint32(16) + max = uint32(len(src)) + dst = make([]byte, 0, len(src)) + chunk []byte + err error + ) + for pos < max { + size := binary.BigEndian.Uint32(src[pos : pos+4]) + pos += 4 + + chunk, err = master.Decode(chunk, src[pos:pos+size]) + if err != nil { + return nil, err + } + pos += size + dst = append(dst, chunk...) + } + return dst, nil +} diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go new file mode 100644 index 0000000..71d1acd --- /dev/null +++ b/vendor/github.com/eapache/queue/queue.go @@ -0,0 +1,102 @@ +/* +Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki. +Using this instead of other, simpler, queue implementations (slice+append or linked list) provides +substantial memory and time benefits, and fewer GC pauses. + +The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe. +*/ +package queue + +// minQueueLen is smallest capacity that queue may have. +// Must be power of 2 for bitwise modulus: x % n == x & (n - 1). +const minQueueLen = 16 + +// Queue represents a single instance of the queue data structure. +type Queue struct { + buf []interface{} + head, tail, count int +} + +// New constructs and returns a new Queue. +func New() *Queue { + return &Queue{ + buf: make([]interface{}, minQueueLen), + } +} + +// Length returns the number of elements currently stored in the queue. +func (q *Queue) Length() int { + return q.count +} + +// resizes the queue to fit exactly twice its current contents +// this can result in shrinking if the queue is less than half-full +func (q *Queue) resize() { + newBuf := make([]interface{}, q.count<<1) + + if q.tail > q.head { + copy(newBuf, q.buf[q.head:q.tail]) + } else { + n := copy(newBuf, q.buf[q.head:]) + copy(newBuf[n:], q.buf[:q.tail]) + } + + q.head = 0 + q.tail = q.count + q.buf = newBuf +} + +// Add puts an element on the end of the queue. +func (q *Queue) Add(elem interface{}) { + if q.count == len(q.buf) { + q.resize() + } + + q.buf[q.tail] = elem + // bitwise modulus + q.tail = (q.tail + 1) & (len(q.buf) - 1) + q.count++ +} + +// Peek returns the element at the head of the queue. This call panics +// if the queue is empty. +func (q *Queue) Peek() interface{} { + if q.count <= 0 { + panic("queue: Peek() called on empty queue") + } + return q.buf[q.head] +} + +// Get returns the element at index i in the queue. If the index is +// invalid, the call will panic. This method accepts both positive and +// negative index values. Index 0 refers to the first element, and +// index -1 refers to the last. +func (q *Queue) Get(i int) interface{} { + // If indexing backwards, convert to positive index. + if i < 0 { + i += q.count + } + if i < 0 || i >= q.count { + panic("queue: Get() called with index out of range") + } + // bitwise modulus + return q.buf[(q.head+i)&(len(q.buf)-1)] +} + +// Remove removes and returns the element from the front of the queue. If the +// queue is empty, the call will panic. +func (q *Queue) Remove() interface{} { + if q.count <= 0 { + panic("queue: Remove() called on empty queue") + } + ret := q.buf[q.head] + q.buf[q.head] = nil + // bitwise modulus + q.head = (q.head + 1) & (len(q.buf) - 1) + q.count-- + // Resize down if buffer 1/4 full. + if len(q.buf) > minQueueLen && (q.count<<2) == len(q.buf) { + q.resize() + } + return ret +} diff --git a/vendor/github.com/go-logfmt/logfmt/decode.go b/vendor/github.com/go-logfmt/logfmt/decode.go new file mode 100644 index 0000000..04e0eff --- /dev/null +++ b/vendor/github.com/go-logfmt/logfmt/decode.go @@ -0,0 +1,237 @@ +package logfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "unicode/utf8" +) + +// A Decoder reads and decodes logfmt records from an input stream. +type Decoder struct { + pos int + key []byte + value []byte + lineNum int + s *bufio.Scanner + err error +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read data from r beyond +// the logfmt records requested. +func NewDecoder(r io.Reader) *Decoder { + dec := &Decoder{ + s: bufio.NewScanner(r), + } + return dec +} + +// ScanRecord advances the Decoder to the next record, which can then be +// parsed with the ScanKeyval method. It returns false when decoding stops, +// either by reaching the end of the input or an error. After ScanRecord +// returns false, the Err method will return any error that occurred during +// decoding, except that if it was io.EOF, Err will return nil. +func (dec *Decoder) ScanRecord() bool { + if dec.err != nil { + return false + } + if !dec.s.Scan() { + dec.err = dec.s.Err() + return false + } + dec.lineNum++ + dec.pos = 0 + return true +} + +// ScanKeyval advances the Decoder to the next key/value pair of the current +// record, which can then be retrieved with the Key and Value methods. It +// returns false when decoding stops, either by reaching the end of the +// current record or an error. +func (dec *Decoder) ScanKeyval() bool { + dec.key, dec.value = nil, nil + if dec.err != nil { + return false + } + + line := dec.s.Bytes() + + // garbage + for p, c := range line[dec.pos:] { + if c > ' ' { + dec.pos += p + goto key + } + } + dec.pos = len(line) + return false + +key: + const invalidKeyError = "invalid key" + + start, multibyte := dec.pos, false + for p, c := range line[dec.pos:] { + switch { + case c == '=': + dec.pos += p + if dec.pos > start { + dec.key = line[start:dec.pos] + if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 { + dec.syntaxError(invalidKeyError) + return false + } + } + if dec.key == nil { + dec.unexpectedByte(c) + return false + } + goto equal + case c == '"': + dec.pos += p + dec.unexpectedByte(c) + return false + case c <= ' ': + dec.pos += p + if dec.pos > start { + dec.key = line[start:dec.pos] + if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 { + dec.syntaxError(invalidKeyError) + return false + } + } + return true + case c >= utf8.RuneSelf: + multibyte = true + } + } + dec.pos = len(line) + if dec.pos > start { + dec.key = line[start:dec.pos] + if multibyte && bytes.IndexRune(dec.key, utf8.RuneError) != -1 { + dec.syntaxError(invalidKeyError) + return false + } + } + return true + +equal: + dec.pos++ + if dec.pos >= len(line) { + return true + } + switch c := line[dec.pos]; { + case c <= ' ': + return true + case c == '"': + goto qvalue + } + + // value + start = dec.pos + for p, c := range line[dec.pos:] { + switch { + case c == '=' || c == '"': + dec.pos += p + dec.unexpectedByte(c) + return false + case c <= ' ': + dec.pos += p + if dec.pos > start { + dec.value = line[start:dec.pos] + } + return true + } + } + dec.pos = len(line) + if dec.pos > start { + dec.value = line[start:dec.pos] + } + return true + +qvalue: + const ( + untermQuote = "unterminated quoted value" + invalidQuote = "invalid quoted value" + ) + + hasEsc, esc := false, false + start = dec.pos + for p, c := range line[dec.pos+1:] { + switch { + case esc: + esc = false + case c == '\\': + hasEsc, esc = true, true + case c == '"': + dec.pos += p + 2 + if hasEsc { + v, ok := unquoteBytes(line[start:dec.pos]) + if !ok { + dec.syntaxError(invalidQuote) + return false + } + dec.value = v + } else { + start++ + end := dec.pos - 1 + if end > start { + dec.value = line[start:end] + } + } + return true + } + } + dec.pos = len(line) + dec.syntaxError(untermQuote) + return false +} + +// Key returns the most recent key found by a call to ScanKeyval. The returned +// slice may point to internal buffers and is only valid until the next call +// to ScanRecord. It does no allocation. +func (dec *Decoder) Key() []byte { + return dec.key +} + +// Value returns the most recent value found by a call to ScanKeyval. The +// returned slice may point to internal buffers and is only valid until the +// next call to ScanRecord. It does no allocation when the value has no +// escape sequences. +func (dec *Decoder) Value() []byte { + return dec.value +} + +// Err returns the first non-EOF error that was encountered by the Scanner. +func (dec *Decoder) Err() error { + return dec.err +} + +func (dec *Decoder) syntaxError(msg string) { + dec.err = &SyntaxError{ + Msg: msg, + Line: dec.lineNum, + Pos: dec.pos + 1, + } +} + +func (dec *Decoder) unexpectedByte(c byte) { + dec.err = &SyntaxError{ + Msg: fmt.Sprintf("unexpected %q", c), + Line: dec.lineNum, + Pos: dec.pos + 1, + } +} + +// A SyntaxError represents a syntax error in the logfmt input stream. +type SyntaxError struct { + Msg string + Line int + Pos int +} + +func (e *SyntaxError) Error() string { + return fmt.Sprintf("logfmt syntax error at pos %d on line %d: %s", e.Pos, e.Line, e.Msg) +} diff --git a/vendor/github.com/go-logfmt/logfmt/doc.go b/vendor/github.com/go-logfmt/logfmt/doc.go new file mode 100644 index 0000000..378e9ad --- /dev/null +++ b/vendor/github.com/go-logfmt/logfmt/doc.go @@ -0,0 +1,6 @@ +// Package logfmt implements utilities to marshal and unmarshal data in the +// logfmt format. The logfmt format records key/value pairs in a way that +// balances readability for humans and simplicity of computer parsing. It is +// most commonly used as a more human friendly alternative to JSON for +// structured logging. +package logfmt diff --git a/vendor/github.com/go-logfmt/logfmt/encode.go b/vendor/github.com/go-logfmt/logfmt/encode.go new file mode 100644 index 0000000..55f1603 --- /dev/null +++ b/vendor/github.com/go-logfmt/logfmt/encode.go @@ -0,0 +1,321 @@ +package logfmt + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "io" + "reflect" + "strings" + "unicode/utf8" +) + +// MarshalKeyvals returns the logfmt encoding of keyvals, a variadic sequence +// of alternating keys and values. +func MarshalKeyvals(keyvals ...interface{}) ([]byte, error) { + buf := &bytes.Buffer{} + if err := NewEncoder(buf).EncodeKeyvals(keyvals...); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// An Encoder writes logfmt data to an output stream. +type Encoder struct { + w io.Writer + scratch bytes.Buffer + needSep bool +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + w: w, + } +} + +var ( + space = []byte(" ") + equals = []byte("=") + newline = []byte("\n") + null = []byte("null") +) + +// EncodeKeyval writes the logfmt encoding of key and value to the stream. A +// single space is written before the second and subsequent keys in a record. +// Nothing is written if a non-nil error is returned. +func (enc *Encoder) EncodeKeyval(key, value interface{}) error { + enc.scratch.Reset() + if enc.needSep { + if _, err := enc.scratch.Write(space); err != nil { + return err + } + } + if err := writeKey(&enc.scratch, key); err != nil { + return err + } + if _, err := enc.scratch.Write(equals); err != nil { + return err + } + if err := writeValue(&enc.scratch, value); err != nil { + return err + } + _, err := enc.w.Write(enc.scratch.Bytes()) + enc.needSep = true + return err +} + +// EncodeKeyvals writes the logfmt encoding of keyvals to the stream. Keyvals +// is a variadic sequence of alternating keys and values. Keys of unsupported +// type are skipped along with their corresponding value. Values of +// unsupported type or that cause a MarshalerError are replaced by their error +// but do not cause EncodeKeyvals to return an error. If a non-nil error is +// returned some key/value pairs may not have be written. +func (enc *Encoder) EncodeKeyvals(keyvals ...interface{}) error { + if len(keyvals) == 0 { + return nil + } + if len(keyvals)%2 == 1 { + keyvals = append(keyvals, nil) + } + for i := 0; i < len(keyvals); i += 2 { + k, v := keyvals[i], keyvals[i+1] + err := enc.EncodeKeyval(k, v) + if err == ErrUnsupportedKeyType { + continue + } + if _, ok := err.(*MarshalerError); ok || err == ErrUnsupportedValueType { + v = err + err = enc.EncodeKeyval(k, v) + } + if err != nil { + return err + } + } + return nil +} + +// MarshalerError represents an error encountered while marshaling a value. +type MarshalerError struct { + Type reflect.Type + Err error +} + +func (e *MarshalerError) Error() string { + return "error marshaling value of type " + e.Type.String() + ": " + e.Err.Error() +} + +// ErrNilKey is returned by Marshal functions and Encoder methods if a key is +// a nil interface or pointer value. +var ErrNilKey = errors.New("nil key") + +// ErrInvalidKey is returned by Marshal functions and Encoder methods if a key +// contains an invalid character. +var ErrInvalidKey = errors.New("invalid key") + +// ErrUnsupportedKeyType is returned by Encoder methods if a key has an +// unsupported type. +var ErrUnsupportedKeyType = errors.New("unsupported key type") + +// ErrUnsupportedValueType is returned by Encoder methods if a value has an +// unsupported type. +var ErrUnsupportedValueType = errors.New("unsupported value type") + +func writeKey(w io.Writer, key interface{}) error { + if key == nil { + return ErrNilKey + } + + switch k := key.(type) { + case string: + return writeStringKey(w, k) + case []byte: + if k == nil { + return ErrNilKey + } + return writeBytesKey(w, k) + case encoding.TextMarshaler: + kb, err := safeMarshal(k) + if err != nil { + return err + } + if kb == nil { + return ErrNilKey + } + return writeBytesKey(w, kb) + case fmt.Stringer: + ks, ok := safeString(k) + if !ok { + return ErrNilKey + } + return writeStringKey(w, ks) + default: + rkey := reflect.ValueOf(key) + switch rkey.Kind() { + case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct: + return ErrUnsupportedKeyType + case reflect.Ptr: + if rkey.IsNil() { + return ErrNilKey + } + return writeKey(w, rkey.Elem().Interface()) + } + return writeStringKey(w, fmt.Sprint(k)) + } +} + +func invalidKeyRune(r rune) bool { + return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError +} + +func invalidKeyString(key string) bool { + return len(key) == 0 || strings.IndexFunc(key, invalidKeyRune) != -1 +} + +func invalidKey(key []byte) bool { + return len(key) == 0 || bytes.IndexFunc(key, invalidKeyRune) != -1 +} + +func writeStringKey(w io.Writer, key string) error { + if invalidKeyString(key) { + return ErrInvalidKey + } + _, err := io.WriteString(w, key) + return err +} + +func writeBytesKey(w io.Writer, key []byte) error { + if invalidKey(key) { + return ErrInvalidKey + } + _, err := w.Write(key) + return err +} + +func writeValue(w io.Writer, value interface{}) error { + switch v := value.(type) { + case nil: + return writeBytesValue(w, null) + case string: + return writeStringValue(w, v, true) + case []byte: + return writeBytesValue(w, v) + case encoding.TextMarshaler: + vb, err := safeMarshal(v) + if err != nil { + return err + } + if vb == nil { + vb = null + } + return writeBytesValue(w, vb) + case error: + se, ok := safeError(v) + return writeStringValue(w, se, ok) + case fmt.Stringer: + ss, ok := safeString(v) + return writeStringValue(w, ss, ok) + default: + rvalue := reflect.ValueOf(value) + switch rvalue.Kind() { + case reflect.Array, reflect.Chan, reflect.Func, reflect.Map, reflect.Slice, reflect.Struct: + return ErrUnsupportedValueType + case reflect.Ptr: + if rvalue.IsNil() { + return writeBytesValue(w, null) + } + return writeValue(w, rvalue.Elem().Interface()) + } + return writeStringValue(w, fmt.Sprint(v), true) + } +} + +func needsQuotedValueRune(r rune) bool { + return r <= ' ' || r == '=' || r == '"' || r == utf8.RuneError +} + +func writeStringValue(w io.Writer, value string, ok bool) error { + var err error + if ok && value == "null" { + _, err = io.WriteString(w, `"null"`) + } else if strings.IndexFunc(value, needsQuotedValueRune) != -1 { + _, err = writeQuotedString(w, value) + } else { + _, err = io.WriteString(w, value) + } + return err +} + +func writeBytesValue(w io.Writer, value []byte) error { + var err error + if bytes.IndexFunc(value, needsQuotedValueRune) != -1 { + _, err = writeQuotedBytes(w, value) + } else { + _, err = w.Write(value) + } + return err +} + +// EndRecord writes a newline character to the stream and resets the encoder +// to the beginning of a new record. +func (enc *Encoder) EndRecord() error { + _, err := enc.w.Write(newline) + if err == nil { + enc.needSep = false + } + return err +} + +// Reset resets the encoder to the beginning of a new record. +func (enc *Encoder) Reset() { + enc.needSep = false +} + +func safeError(err error) (s string, ok bool) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(err); v.Kind() == reflect.Ptr && v.IsNil() { + s, ok = "null", false + } else { + panic(panicVal) + } + } + }() + s, ok = err.Error(), true + return +} + +func safeString(str fmt.Stringer) (s string, ok bool) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(str); v.Kind() == reflect.Ptr && v.IsNil() { + s, ok = "null", false + } else { + panic(panicVal) + } + } + }() + s, ok = str.String(), true + return +} + +func safeMarshal(tm encoding.TextMarshaler) (b []byte, err error) { + defer func() { + if panicVal := recover(); panicVal != nil { + if v := reflect.ValueOf(tm); v.Kind() == reflect.Ptr && v.IsNil() { + b, err = nil, nil + } else { + panic(panicVal) + } + } + }() + b, err = tm.MarshalText() + if err != nil { + return nil, &MarshalerError{ + Type: reflect.TypeOf(tm), + Err: err, + } + } + return +} diff --git a/vendor/github.com/go-logfmt/logfmt/fuzz.go b/vendor/github.com/go-logfmt/logfmt/fuzz.go new file mode 100644 index 0000000..6553b35 --- /dev/null +++ b/vendor/github.com/go-logfmt/logfmt/fuzz.go @@ -0,0 +1,126 @@ +// +build gofuzz + +package logfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "reflect" + + kr "github.com/kr/logfmt" +) + +// Fuzz checks reserialized data matches +func Fuzz(data []byte) int { + parsed, err := parse(data) + if err != nil { + return 0 + } + var w1 bytes.Buffer + if err = write(parsed, &w1); err != nil { + panic(err) + } + parsed, err = parse(w1.Bytes()) + if err != nil { + panic(err) + } + var w2 bytes.Buffer + if err = write(parsed, &w2); err != nil { + panic(err) + } + if !bytes.Equal(w1.Bytes(), w2.Bytes()) { + panic(fmt.Sprintf("reserialized data does not match:\n%q\n%q\n", w1.Bytes(), w2.Bytes())) + } + return 1 +} + +// FuzzVsKR checks go-logfmt/logfmt against kr/logfmt +func FuzzVsKR(data []byte) int { + parsed, err := parse(data) + parsedKR, errKR := parseKR(data) + + // github.com/go-logfmt/logfmt is a stricter parser. It returns errors for + // more inputs than github.com/kr/logfmt. Ignore any inputs that have a + // stict error. + if err != nil { + return 0 + } + + // Fail if the more forgiving parser finds an error not found by the + // stricter parser. + if errKR != nil { + panic(fmt.Sprintf("unmatched error: %v", errKR)) + } + + if !reflect.DeepEqual(parsed, parsedKR) { + panic(fmt.Sprintf("parsers disagree:\n%+v\n%+v\n", parsed, parsedKR)) + } + return 1 +} + +type kv struct { + k, v []byte +} + +func parse(data []byte) ([][]kv, error) { + var got [][]kv + dec := NewDecoder(bytes.NewReader(data)) + for dec.ScanRecord() { + var kvs []kv + for dec.ScanKeyval() { + kvs = append(kvs, kv{dec.Key(), dec.Value()}) + } + got = append(got, kvs) + } + return got, dec.Err() +} + +func parseKR(data []byte) ([][]kv, error) { + var ( + s = bufio.NewScanner(bytes.NewReader(data)) + err error + h saveHandler + got [][]kv + ) + for err == nil && s.Scan() { + h.kvs = nil + err = kr.Unmarshal(s.Bytes(), &h) + got = append(got, h.kvs) + } + if err == nil { + err = s.Err() + } + return got, err +} + +type saveHandler struct { + kvs []kv +} + +func (h *saveHandler) HandleLogfmt(key, val []byte) error { + if len(key) == 0 { + key = nil + } + if len(val) == 0 { + val = nil + } + h.kvs = append(h.kvs, kv{key, val}) + return nil +} + +func write(recs [][]kv, w io.Writer) error { + enc := NewEncoder(w) + for _, rec := range recs { + for _, f := range rec { + if err := enc.EncodeKeyval(f.k, f.v); err != nil { + return err + } + } + if err := enc.EndRecord(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/go-logfmt/logfmt/jsonstring.go b/vendor/github.com/go-logfmt/logfmt/jsonstring.go new file mode 100644 index 0000000..030ac85 --- /dev/null +++ b/vendor/github.com/go-logfmt/logfmt/jsonstring.go @@ -0,0 +1,277 @@ +package logfmt + +import ( + "bytes" + "io" + "strconv" + "sync" + "unicode" + "unicode/utf16" + "unicode/utf8" +) + +// Taken from Go's encoding/json and modified for use here. + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +var hex = "0123456789abcdef" + +var bufferPool = sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, +} + +func getBuffer() *bytes.Buffer { + return bufferPool.Get().(*bytes.Buffer) +} + +func poolBuffer(buf *bytes.Buffer) { + buf.Reset() + bufferPool.Put(buf) +} + +// NOTE: keep in sync with writeQuotedBytes below. +func writeQuotedString(w io.Writer, s string) (int, error) { + buf := getBuffer() + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' { + i++ + continue + } + if start < i { + buf.WriteString(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + case '\t': + buf.WriteByte('\\') + buf.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n, \r, and \t. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRuneInString(s[i:]) + if c == utf8.RuneError { + if start < i { + buf.WriteString(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.WriteString(s[start:]) + } + buf.WriteByte('"') + n, err := w.Write(buf.Bytes()) + poolBuffer(buf) + return n, err +} + +// NOTE: keep in sync with writeQuoteString above. +func writeQuotedBytes(w io.Writer, s []byte) (int, error) { + buf := getBuffer() + buf.WriteByte('"') + start := 0 + for i := 0; i < len(s); { + if b := s[i]; b < utf8.RuneSelf { + if 0x20 <= b && b != '\\' && b != '"' { + i++ + continue + } + if start < i { + buf.Write(s[start:i]) + } + switch b { + case '\\', '"': + buf.WriteByte('\\') + buf.WriteByte(b) + case '\n': + buf.WriteByte('\\') + buf.WriteByte('n') + case '\r': + buf.WriteByte('\\') + buf.WriteByte('r') + case '\t': + buf.WriteByte('\\') + buf.WriteByte('t') + default: + // This encodes bytes < 0x20 except for \n, \r, and \t. + buf.WriteString(`\u00`) + buf.WriteByte(hex[b>>4]) + buf.WriteByte(hex[b&0xF]) + } + i++ + start = i + continue + } + c, size := utf8.DecodeRune(s[i:]) + if c == utf8.RuneError { + if start < i { + buf.Write(s[start:i]) + } + buf.WriteString(`\ufffd`) + i += size + start = i + continue + } + i += size + } + if start < len(s) { + buf.Write(s[start:]) + } + buf.WriteByte('"') + n, err := w.Write(buf.Bytes()) + poolBuffer(buf) + return n, err +} + +// getu4 decodes \uXXXX from the beginning of s, returning the hex value, +// or it returns -1. +func getu4(s []byte) rune { + if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { + return -1 + } + r, err := strconv.ParseUint(string(s[2:6]), 16, 64) + if err != nil { + return -1 + } + return rune(r) +} + +func unquoteBytes(s []byte) (t []byte, ok bool) { + if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { + return + } + s = s[1 : len(s)-1] + + // Check for unusual characters. If there are none, + // then no unquoting is needed, so return a slice of the + // original bytes. + r := 0 + for r < len(s) { + c := s[r] + if c == '\\' || c == '"' || c < ' ' { + break + } + if c < utf8.RuneSelf { + r++ + continue + } + rr, size := utf8.DecodeRune(s[r:]) + if rr == utf8.RuneError { + break + } + r += size + } + if r == len(s) { + return s, true + } + + b := make([]byte, len(s)+2*utf8.UTFMax) + w := copy(b, s[0:r]) + for r < len(s) { + // Out of room? Can only happen if s is full of + // malformed UTF-8 and we're replacing each + // byte with RuneError. + if w >= len(b)-2*utf8.UTFMax { + nb := make([]byte, (len(b)+utf8.UTFMax)*2) + copy(nb, b[0:w]) + b = nb + } + switch c := s[r]; { + case c == '\\': + r++ + if r >= len(s) { + return + } + switch s[r] { + default: + return + case '"', '\\', '/', '\'': + b[w] = s[r] + r++ + w++ + case 'b': + b[w] = '\b' + r++ + w++ + case 'f': + b[w] = '\f' + r++ + w++ + case 'n': + b[w] = '\n' + r++ + w++ + case 'r': + b[w] = '\r' + r++ + w++ + case 't': + b[w] = '\t' + r++ + w++ + case 'u': + r-- + rr := getu4(s[r:]) + if rr < 0 { + return + } + r += 6 + if utf16.IsSurrogate(rr) { + rr1 := getu4(s[r:]) + if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { + // A valid pair; consume. + r += 6 + w += utf8.EncodeRune(b[w:], dec) + break + } + // Invalid surrogate; fall back to replacement rune. + rr = unicode.ReplacementChar + } + w += utf8.EncodeRune(b[w:], rr) + } + + // Quote, control characters are invalid. + case c == '"', c < ' ': + return + + // ASCII + case c < utf8.RuneSelf: + b[w] = c + r++ + w++ + + // Coerce to well-formed UTF-8. + default: + rr, size := utf8.DecodeRune(s[r:]) + r += size + w += utf8.EncodeRune(b[w:], rr) + } + } + return b[0:w], true +} diff --git a/vendor/github.com/gogo/protobuf/proto/clone.go b/vendor/github.com/gogo/protobuf/proto/clone.go new file mode 100644 index 0000000..5d4cba4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/clone.go @@ -0,0 +1,234 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(pb Message) Message { + in := reflect.ValueOf(pb) + if in.IsNil() { + return pb + } + + out := reflect.New(in.Type().Elem()) + // out is empty so a merge is a deep copy. + mergeStruct(out.Elem(), in.Elem()) + return out.Interface().(Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + // Explicit test prior to mergeStruct so that mistyped nils will fail + panic("proto: type mismatch") + } + if in.IsNil() { + // Merging nil into non-nil is a quiet no-op + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, ok := in.Addr().Interface().(extensionsBytes); ok { + emOut := out.Addr().Interface().(extensionsBytes) + bIn := emIn.GetExtensions() + bOut := emOut.GetExtensions() + *bOut = append(*bOut, *bIn...) + } else if emIn, ok := extendable(in.Addr().Interface()); ok { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go new file mode 100644 index 0000000..737f273 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -0,0 +1,978 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// The fundamental decoders that interpret bytes on the wire. +// Those that take integer types all return uint64 and are +// therefore of type valueDecoder. + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// These are not ValueDecoders: they produce an array of bytes or a string. +// bytes, embedded messages + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +// If the protocol buffer has extensions, and the field matches, add it as an extension. +// Otherwise, if the XXX_unrecognized field exists, append the skipped data there. +func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error { + oi := o.index + + err := o.skip(t, tag, wire) + if err != nil { + return err + } + + if !unrecField.IsValid() { + return nil + } + + ptr := structPointer_Bytes(base, unrecField) + + // Add the skipped field to struct field + obuf := o.buf + + o.buf = *ptr + o.EncodeVarint(uint64(tag<<3 | wire)) + *ptr = append(o.buf, obuf[oi:o.index]...) + + o.buf = obuf + + return nil +} + +// Skip the next item in the buffer. Its wire type is decoded and presented as an argument. +func (o *Buffer) skip(t reflect.Type, tag, wire int) error { + + var u uint64 + var err error + + switch wire { + case WireVarint: + _, err = o.DecodeVarint() + case WireFixed64: + _, err = o.DecodeFixed64() + case WireBytes: + _, err = o.DecodeRawBytes(false) + case WireFixed32: + _, err = o.DecodeFixed32() + case WireStartGroup: + for { + u, err = o.DecodeVarint() + if err != nil { + break + } + fwire := int(u & 0x7) + if fwire == WireEndGroup { + break + } + ftag := int(u >> 3) + err = o.skip(t, ftag, fwire) + if err != nil { + break + } + } + default: + err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t) + } + return err +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The method should reset the receiver before +// decoding starts. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + return UnmarshalMerge(buf, pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +func (p *Buffer) DecodeGroup(pb Message) error { + typ, base, err := getbase(pb) + if err != nil { + return err + } + return p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), true, base) +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(Unmarshaler); ok { + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + typ, base, err := getbase(pb) + if err != nil { + return err + } + + err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base) + + if collectStats { + stats.Decode++ + } + + return err +} + +// unmarshalType does the work of unmarshaling a structure. +func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error { + var state errorState + required, reqFields := prop.reqCount, uint64(0) + + var err error + for err == nil && o.index < len(o.buf) { + oi := o.index + var u uint64 + u, err = o.DecodeVarint() + if err != nil { + break + } + wire := int(u & 0x7) + if wire == WireEndGroup { + if is_group { + if required > 0 { + // Not enough information to determine the exact field. + // (See below.) + return &RequiredNotSetError{"{Unknown}"} + } + return nil // input is satisfied + } + return fmt.Errorf("proto: %s: wiretype end group for non-group", st) + } + tag := int(u >> 3) + if tag <= 0 { + return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire) + } + fieldnum, ok := prop.decoderTags.get(tag) + if !ok { + // Maybe it's an extension? + if prop.extendable { + if e, eok := structPointer_Interface(base, st).(extensionsBytes); eok { + if isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + ext := e.GetExtensions() + *ext = append(*ext, o.buf[oi:o.index]...) + } + continue + } + } else if e, _ := extendable(structPointer_Interface(base, st)); isExtensionField(e, int32(tag)) { + if err = o.skip(st, tag, wire); err == nil { + extmap := e.extensionsWrite() + ext := extmap[int32(tag)] // may be missing + ext.enc = append(ext.enc, o.buf[oi:o.index]...) + extmap[int32(tag)] = ext + } + continue + } + } + // Maybe it's a oneof? + if prop.oneofUnmarshaler != nil { + m := structPointer_Interface(base, st).(Message) + // First return value indicates whether tag is a oneof field. + ok, err = prop.oneofUnmarshaler(m, tag, wire, o) + if err == ErrInternalBadWireType { + // Map the error to something more descriptive. + // Do the formatting here to save generated code space. + err = fmt.Errorf("bad wiretype for oneof field in %T", m) + } + if ok { + continue + } + } + err = o.skipAndSave(st, tag, wire, base, prop.unrecField) + continue + } + p := prop.Prop[fieldnum] + + if p.dec == nil { + fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name) + continue + } + dec := p.dec + if wire != WireStartGroup && wire != p.WireType { + if wire == WireBytes && p.packedDec != nil { + // a packable field + dec = p.packedDec + } else { + err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType) + continue + } + } + decErr := dec(o, p, base) + if decErr != nil && !state.shouldContinue(decErr, p) { + err = decErr + } + if err == nil && p.Required { + // Successfully decoded a required field. + if tag <= 64 { + // use bitmap for fields 1-64 to catch field reuse. + var mask uint64 = 1 << uint64(tag-1) + if reqFields&mask == 0 { + // new required field + reqFields |= mask + required-- + } + } else { + // This is imprecise. It can be fooled by a required field + // with a tag > 64 that is encoded twice; that's very rare. + // A fully correct implementation would require allocating + // a data structure, which we would like to avoid. + required-- + } + } + } + if err == nil { + if is_group { + return io.ErrUnexpectedEOF + } + if state.err != nil { + return state.err + } + if required > 0 { + // Not enough information to determine the exact field. If we use extra + // CPU, we could determine the field only if the missing required field + // has a tag <= 64 and we check reqFields. + return &RequiredNotSetError{"{Unknown}"} + } + } + return err +} + +// Individual type decoders +// For each, +// u is the decoded value, +// v is a pointer to the field (pointer) in the struct + +// Sizes of the pools to allocate inside the Buffer. +// The goal is modest amortization and allocation +// on at least 16-byte boundaries. +const ( + boolPoolSize = 16 + uint32PoolSize = 8 + uint64PoolSize = 4 +) + +// Decode a bool. +func (o *Buffer) dec_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + if len(o.bools) == 0 { + o.bools = make([]bool, boolPoolSize) + } + o.bools[0] = u != 0 + *structPointer_Bool(base, p.field) = &o.bools[0] + o.bools = o.bools[1:] + return nil +} + +func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + *structPointer_BoolVal(base, p.field) = u != 0 + return nil +} + +// Decode an int32. +func (o *Buffer) dec_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32_Set(structPointer_Word32(base, p.field), o, uint32(u)) + return nil +} + +func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u)) + return nil +} + +// Decode an int64. +func (o *Buffer) dec_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, u) + return nil +} + +func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, u) + return nil +} + +// Decode a string. +func (o *Buffer) dec_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_String(base, p.field) = &s + return nil +} + +func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + *structPointer_StringVal(base, p.field) = s + return nil +} + +// Decode a slice of bytes ([]byte). +func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + *structPointer_Bytes(base, p.field) = b + return nil +} + +// Decode a slice of bools ([]bool). +func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + v := structPointer_BoolSlice(base, p.field) + *v = append(*v, u != 0) + return nil +} + +// Decode a slice of bools ([]bool) in packed format. +func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error { + v := structPointer_BoolSlice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded bools + fin := o.index + nb + if fin < o.index { + return errOverflow + } + + y := *v + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + y = append(y, u != 0) + } + + *v = y + return nil +} + +// Decode a slice of int32s ([]int32). +func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + structPointer_Word32Slice(base, p.field).Append(uint32(u)) + return nil +} + +// Decode a slice of int32s ([]int32) in packed format. +func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int32s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(uint32(u)) + } + return nil +} + +// Decode a slice of int64s ([]int64). +func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error { + u, err := p.valDec(o) + if err != nil { + return err + } + + structPointer_Word64Slice(base, p.field).Append(u) + return nil +} + +// Decode a slice of int64s ([]int64) in packed format. +func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Slice(base, p.field) + + nn, err := o.DecodeVarint() + if err != nil { + return err + } + nb := int(nn) // number of bytes of encoded int64s + + fin := o.index + nb + if fin < o.index { + return errOverflow + } + for o.index < fin { + u, err := p.valDec(o) + if err != nil { + return err + } + v.Append(u) + } + return nil +} + +// Decode a slice of strings ([]string). +func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error { + s, err := o.DecodeStringBytes() + if err != nil { + return err + } + v := structPointer_StringSlice(base, p.field) + *v = append(*v, s) + return nil +} + +// Decode a slice of slice of bytes ([][]byte). +func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + v := structPointer_BytesSlice(base, p.field) + *v = append(*v, b) + return nil +} + +// Decode a map field. +func (o *Buffer) dec_new_map(p *Properties, base structPointer) error { + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + oi := o.index // index at the end of this map entry + o.index -= len(raw) // move buffer back to start of map entry + + mptr := structPointer_NewAt(base, p.field, p.mtype) // *map[K]V + if mptr.Elem().IsNil() { + mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem())) + } + v := mptr.Elem() // map[K]V + + // Prepare addressable doubly-indirect placeholders for the key and value types. + // See enc_new_map for why. + keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K + keybase := toStructPointer(keyptr.Addr()) // **K + + var valbase structPointer + var valptr reflect.Value + switch p.mtype.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valptr = reflect.ValueOf(&dummy) // *[]byte + valbase = toStructPointer(valptr) // *[]byte + case reflect.Ptr: + // message; valptr is **Msg; need to allocate the intermediate pointer + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valptr.Set(reflect.New(valptr.Type().Elem())) + valbase = toStructPointer(valptr) + default: + // everything else + valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V + valbase = toStructPointer(valptr.Addr()) // **V + } + + // Decode. + // This parses a restricted wire format, namely the encoding of a message + // with two fields. See enc_new_map for the format. + for o.index < oi { + // tagcode for key and value properties are always a single byte + // because they have tags 1 and 2. + tagcode := o.buf[o.index] + o.index++ + switch tagcode { + case p.mkeyprop.tagcode[0]: + if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil { + return err + } + case p.mvalprop.tagcode[0]: + if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil { + return err + } + default: + // TODO: Should we silently skip this instead? + return fmt.Errorf("proto: bad map data tag %d", raw[0]) + } + } + keyelem, valelem := keyptr.Elem(), valptr.Elem() + if !keyelem.IsValid() { + keyelem = reflect.Zero(p.mtype.Key()) + } + if !valelem.IsValid() { + valelem = reflect.Zero(p.mtype.Elem()) + } + + v.SetMapIndex(keyelem, valelem) + return nil +} + +// Decode a group. +func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error { + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + return o.unmarshalType(p.stype, p.sprop, true, bas) +} + +// Decode an embedded message. +func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + bas := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(bas) { + // allocate new nested message + bas = toStructPointer(reflect.New(p.stype)) + structPointer_SetStructPointer(base, p.field, bas) + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := structPointer_Interface(bas, p.stype) + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of embedded messages. +func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, false, base) +} + +// Decode a slice of embedded groups. +func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error { + return o.dec_slice_struct(p, true, base) +} + +// Decode a slice of structs ([]*struct). +func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error { + v := reflect.New(p.stype) + bas := toStructPointer(v) + structPointer_StructPointerSlice(base, p.field).Append(bas) + + if is_group { + err := o.unmarshalType(p.stype, p.sprop, is_group, bas) + return err + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + iv := v.Interface() + return iv.(Unmarshaler).Unmarshal(raw) + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, bas) + + o.buf = obuf + o.index = oi + + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go new file mode 100644 index 0000000..6fb74de --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -0,0 +1,172 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +// Decode a reference to a struct pointer. +func (o *Buffer) dec_ref_struct_message(p *Properties, base structPointer) (err error) { + raw, e := o.DecodeRawBytes(false) + if e != nil { + return e + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is a pointer receiver") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + bas := structPointer_FieldPointer(base, p.field) + + err = o.unmarshalType(p.stype, p.sprop, false, bas) + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers ([]struct). +func (o *Buffer) dec_slice_ref_struct(p *Properties, is_group bool, base structPointer) error { + newBas := appendStructPointer(base, p.field, p.sstype) + + if is_group { + panic("not supported, maybe in future, if requested.") + } + + raw, err := o.DecodeRawBytes(false) + if err != nil { + return err + } + + // If the object can unmarshal itself, let it. + if p.isUnmarshaler { + panic("not supported, since this is not a pointer receiver.") + } + + obuf := o.buf + oi := o.index + o.buf = raw + o.index = 0 + + err = o.unmarshalType(p.stype, p.sprop, is_group, newBas) + + o.buf = obuf + o.index = oi + + return err +} + +// Decode a slice of references to struct pointers. +func (o *Buffer) dec_slice_ref_struct_message(p *Properties, base structPointer) error { + return o.dec_slice_ref_struct(p, false, base) +} + +func setPtrCustomType(base structPointer, f field, v interface{}) { + if v == nil { + return + } + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) +} + +func setCustomType(base structPointer, f field, value interface{}) { + if value == nil { + return + } + v := reflect.ValueOf(value).Elem() + t := reflect.TypeOf(value).Elem() + kind := t.Kind() + switch kind { + case reflect.Slice: + slice := reflect.MakeSlice(t, v.Len(), v.Cap()) + reflect.Copy(slice, v) + oldHeader := structPointer_GetSliceHeader(base, f) + oldHeader.Data = slice.Pointer() + oldHeader.Len = v.Len() + oldHeader.Cap = v.Cap() + default: + size := reflect.TypeOf(value).Elem().Size() + structPointer_Copy(toStructPointer(reflect.ValueOf(value)), structPointer_Add(base, f), int(size)) + } +} + +func (o *Buffer) dec_custom_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + setPtrCustomType(base, p.field, custom) + return nil +} + +func (o *Buffer) dec_custom_ref_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + if custom != nil { + setCustomType(base, p.field, custom) + } + return nil +} + +// Decode a slice of bytes ([]byte) into a slice of custom types. +func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error { + b, err := o.DecodeRawBytes(true) + if err != nil { + return err + } + i := reflect.New(p.ctype.Elem()).Interface() + custom := (i).(Unmarshaler) + if err := custom.Unmarshal(b); err != nil { + return err + } + newBas := appendStructPointer(base, p.field, p.ctype) + + var zero field + setCustomType(newBas, zero, custom) + + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration.go b/vendor/github.com/gogo/protobuf/proto/duration.go new file mode 100644 index 0000000..93464c9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 0000000..18e2a5f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go new file mode 100644 index 0000000..8b84d1b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -0,0 +1,1362 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "fmt" + "reflect" + "sort" +) + +// RequiredNotSetError is the error returned if Marshal is called with +// a protocol buffer struct whose required fields have not +// all been initialized. It is also the error returned if Unmarshal is +// called with an encoded protocol buffer that does not include all the +// required fields. +// +// When printed, RequiredNotSetError reports the first unset required field in a +// message. If the field cannot be precisely determined, it is reported as +// "{Unknown}". +type RequiredNotSetError struct { + field string +} + +func (e *RequiredNotSetError) Error() string { + return fmt.Sprintf("proto: required field %q not set", e.field) +} + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// maxMarshalSize is the largest allowed size of an encoded protobuf, +// since C++ and Java use signed int32s for the size. +const maxMarshalSize = 1<<31 - 1 + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + return sizeVarint(x) +} + +func sizeVarint(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +func sizeFixed64(x uint64) int { + return 8 +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +func sizeFixed32(x uint64) int { + return 4 +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +func sizeZigzag64(x uint64) int { + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +func sizeZigzag32(x uint64) int { + return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +func sizeRawBytes(b []byte) int { + return sizeVarint(uint64(len(b))) + + len(b) +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +func sizeStringBytes(s string) int { + return sizeVarint(uint64(len(s))) + + len(s) +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, returning the data. +func Marshal(pb Message) ([]byte, error) { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + return m.Marshal() + } + p := NewBuffer(nil) + err := p.Marshal(pb) + if p.buf == nil && err == nil { + // Return a non-nil slice on success. + return []byte{}, nil + } + return p.buf, err +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + var state errorState + err = p.enc_len_struct(GetProperties(t.Elem()), base, &state) + } + return err +} + +// Marshal takes the protocol buffer +// and encodes it into the wire format, writing the result to the +// Buffer. +func (p *Buffer) Marshal(pb Message) error { + // Can the object marshal itself? + if m, ok := pb.(Marshaler); ok { + data, err := m.Marshal() + p.buf = append(p.buf, data...) + return err + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return ErrNil + } + if err == nil { + err = p.enc_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Encode++ // Parens are to work around a goimports bug. + } + + if len(p.buf) > maxMarshalSize { + return ErrTooLarge + } + return err +} + +// Size returns the encoded size of a protocol buffer. +func Size(pb Message) (n int) { + // Can the object marshal itself? If so, Size is slow. + // TODO: add Size to Marshaler, or add a Sizer interface. + if m, ok := pb.(Marshaler); ok { + b, _ := m.Marshal() + return len(b) + } + + t, base, err := getbase(pb) + if structPointer_IsNil(base) { + return 0 + } + if err == nil { + n = size_struct(GetProperties(t.Elem()), base) + } + + if collectStats { + (stats).Size++ // Parens are to work around a goimports bug. + } + + return +} + +// Individual type encoders. + +// Encode a bool. +func (o *Buffer) enc_bool(p *Properties, base structPointer) error { + v := *structPointer_Bool(base, p.field) + if v == nil { + return ErrNil + } + x := 0 + if *v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + if !v { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, 1) + return nil +} + +func size_bool(p *Properties, base structPointer) int { + v := *structPointer_Bool(base, p.field) + if v == nil { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +func size_proto3_bool(p *Properties, base structPointer) int { + v := *structPointer_BoolVal(base, p.field) + if !v && !p.oneof { + return 0 + } + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode an int32. +func (o *Buffer) enc_int32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a uint32. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return ErrNil + } + x := word32_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32(base, p.field) + if word32_IsNil(v) { + return 0 + } + x := word32_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func size_proto3_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode an int64. +func (o *Buffer) enc_int64(p *Properties, base structPointer) error { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return ErrNil + } + x := word64_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64(base, p.field) + if word64_IsNil(v) { + return 0 + } + x := word64_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +func size_proto3_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + if x == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a string. +func (o *Buffer) enc_string(p *Properties, base structPointer) error { + v := *structPointer_String(base, p.field) + if v == nil { + return ErrNil + } + x := *v + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(x) + return nil +} + +func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + if v == "" { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_string(p *Properties, base structPointer) (n int) { + v := *structPointer_String(base, p.field) + if v == nil { + return 0 + } + x := *v + n += len(p.tagcode) + n += sizeStringBytes(x) + return +} + +func size_proto3_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + if v == "" && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} + +// Encode a message struct. +func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return state.err + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +func size_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a group struct. +func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error { + var state errorState + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return ErrNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + err := o.enc_struct(p.sprop, b) + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return state.err +} + +func size_struct_group(p *Properties, base structPointer) (n int) { + b := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(b) { + return 0 + } + + n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup)) + n += size_struct(p.sprop, b) + n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup)) + return +} + +// Encode a slice of bools ([]bool). +func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + for _, x := range s { + o.buf = append(o.buf, p.tagcode...) + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_bool(p *Properties, base structPointer) int { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + return l * (len(p.tagcode) + 1) // each bool takes exactly one byte +} + +// Encode a slice of bools ([]bool) in packed format. +func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(l)) // each bool takes exactly one byte + for _, x := range s { + v := uint64(0) + if x { + v = 1 + } + p.valEnc(o, v) + } + return nil +} + +func size_slice_packed_bool(p *Properties, base structPointer) (n int) { + s := *structPointer_BoolSlice(base, p.field) + l := len(s) + if l == 0 { + return 0 + } + n += len(p.tagcode) + n += sizeVarint(uint64(l)) + n += l // each bool takes exactly one byte + return +} + +// Encode a slice of bytes ([]byte). +func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(s) + return nil +} + +func size_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +func size_proto3_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if len(s) == 0 && !p.oneof { + return 0 + } + n += len(p.tagcode) + n += sizeRawBytes(s) + return +} + +// Encode a slice of int32s ([]int32). +func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of int32s ([]int32) in packed format. +func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + p.valEnc(buf, uint64(x)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + x := int32(s.Index(i)) // permit sign extension to use full 64-bit range + bufSize += p.valSize(uint64(x)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of uint32s ([]uint32). +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + x := s.Index(i) + p.valEnc(o, uint64(x)) + } + return nil +} + +func size_slice_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + x := s.Index(i) + n += p.valSize(uint64(x)) + } + return +} + +// Encode a slice of uint32s ([]uint32) in packed format. +// Exactly the same as int32, except for no sign extension. +func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, uint64(s.Index(i))) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_uint32(p *Properties, base structPointer) (n int) { + s := structPointer_Word32Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(uint64(s.Index(i))) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of int64s ([]int64). +func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, s.Index(i)) + } + return nil +} + +func size_slice_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + for i := 0; i < l; i++ { + n += len(p.tagcode) + n += p.valSize(s.Index(i)) + } + return +} + +// Encode a slice of int64s ([]int64) in packed format. +func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return ErrNil + } + // TODO: Reuse a Buffer. + buf := NewBuffer(nil) + for i := 0; i < l; i++ { + p.valEnc(buf, s.Index(i)) + } + + o.buf = append(o.buf, p.tagcode...) + o.EncodeVarint(uint64(len(buf.buf))) + o.buf = append(o.buf, buf.buf...) + return nil +} + +func size_slice_packed_int64(p *Properties, base structPointer) (n int) { + s := structPointer_Word64Slice(base, p.field) + l := s.Len() + if l == 0 { + return 0 + } + var bufSize int + for i := 0; i < l; i++ { + bufSize += p.valSize(s.Index(i)) + } + + n += len(p.tagcode) + n += sizeVarint(uint64(bufSize)) + n += bufSize + return +} + +// Encode a slice of slice of bytes ([][]byte). +func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return ErrNil + } + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(ss[i]) + } + return nil +} + +func size_slice_slice_byte(p *Properties, base structPointer) (n int) { + ss := *structPointer_BytesSlice(base, p.field) + l := len(ss) + if l == 0 { + return 0 + } + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeRawBytes(ss[i]) + } + return +} + +// Encode a slice of strings ([]string). +func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + for i := 0; i < l; i++ { + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(ss[i]) + } + return nil +} + +func size_slice_string(p *Properties, base structPointer) (n int) { + ss := *structPointer_StringSlice(base, p.field) + l := len(ss) + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + n += sizeStringBytes(ss[i]) + } + return +} + +// Encode a slice of message structs ([]*struct). +func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + } + return state.err +} + +func size_slice_struct_message(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := s.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +// Encode a slice of group structs ([]*struct). +func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error { + var state errorState + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return errRepeatedHasNil + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup)) + + err := o.enc_struct(p.sprop, b) + + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup)) + } + return state.err +} + +func size_slice_struct_group(p *Properties, base structPointer) (n int) { + s := structPointer_StructPointerSlice(base, p.field) + l := s.Len() + + n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup)) + n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup)) + for i := 0; i < l; i++ { + b := s.Index(i) + if structPointer_IsNil(b) { + return // return size up to this point + } + + n += size_struct(p.sprop, b) + } + return +} + +// Encode an extension map. +func (o *Buffer) enc_map(p *Properties, base structPointer) error { + exts := structPointer_ExtMap(base, p.field) + if err := encodeExtensionsMap(*exts); err != nil { + return err + } + + return o.enc_map_body(*exts) +} + +func (o *Buffer) enc_exts(p *Properties, base structPointer) error { + exts := structPointer_Extensions(base, p.field) + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { + return err + } + + return o.enc_map_body(v) +} + +func (o *Buffer) enc_map_body(v map[int32]Extension) error { + // Fast-path for common cases: zero or one extensions. + if len(v) <= 1 { + for _, e := range v { + o.buf = append(o.buf, e.enc...) + } + return nil + } + + // Sort keys to provide a deterministic encoding. + keys := make([]int, 0, len(v)) + for k := range v { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + o.buf = append(o.buf, v[int32(k)].enc...) + } + return nil +} + +func size_map(p *Properties, base structPointer) int { + v := structPointer_ExtMap(base, p.field) + return extensionsMapSize(*v) +} + +func size_exts(p *Properties, base structPointer) int { + v := structPointer_Extensions(base, p.field) + return extensionsSize(v) +} + +// Encode a map field. +func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { + var state errorState // XXX: or do we need to plumb this through? + + /* + A map defined as + map map_field = N; + is encoded in the same way as + message MapFieldEntry { + key_type key = 1; + value_type value = 2; + } + repeated MapFieldEntry map_field = N; + */ + + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + if v.Len() == 0 { + return nil + } + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + enc := func() error { + if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { + return err + } + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { + return err + } + return nil + } + + // Don't sort map keys. It is not required by the spec, and C++ doesn't do it. + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + + keycopy.Set(key) + valcopy.Set(val) + + o.buf = append(o.buf, p.tagcode...) + if err := o.enc_len_thing(enc, &state); err != nil { + return err + } + } + return nil +} + +func size_new_map(p *Properties, base structPointer) int { + v := structPointer_NewAt(base, p.field, p.mtype).Elem() // map[K]V + + keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype) + + n := 0 + for _, key := range v.MapKeys() { + val := v.MapIndex(key) + keycopy.Set(key) + valcopy.Set(val) + + // Tag codes for key and val are the responsibility of the sub-sizer. + keysize := p.mkeyprop.size(p.mkeyprop, keybase) + valsize := p.mvalprop.size(p.mvalprop, valbase) + entry := keysize + valsize + // Add on tag code and length of map entry itself. + n += len(p.tagcode) + sizeVarint(uint64(entry)) + entry + } + return n +} + +// mapEncodeScratch returns a new reflect.Value matching the map's value type, +// and a structPointer suitable for passing to an encoder or sizer. +func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) { + // Prepare addressable doubly-indirect placeholders for the key and value types. + // This is needed because the element-type encoders expect **T, but the map iteration produces T. + + keycopy = reflect.New(mapType.Key()).Elem() // addressable K + keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K + keyptr.Set(keycopy.Addr()) // + keybase = toStructPointer(keyptr.Addr()) // **K + + // Value types are more varied and require special handling. + switch mapType.Elem().Kind() { + case reflect.Slice: + // []byte + var dummy []byte + valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte + valbase = toStructPointer(valcopy.Addr()) + case reflect.Ptr: + // message; the generated field type is map[K]*Msg (so V is *Msg), + // so we only need one level of indirection. + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valbase = toStructPointer(valcopy.Addr()) + default: + // everything else + valcopy = reflect.New(mapType.Elem()).Elem() // addressable V + valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V + valptr.Set(valcopy.Addr()) // + valbase = toStructPointer(valptr.Addr()) // **V + } + return +} + +// Encode a struct. +func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error { + var state errorState + // Encode fields in tag order so that decoders may use optimizations + // that depend on the ordering. + // https://developers.google.com/protocol-buffers/docs/encoding#order + for _, i := range prop.order { + p := prop.Prop[i] + if p.enc != nil { + err := p.enc(o, p, base) + if err != nil { + if err == ErrNil { + if p.Required && state.err == nil { + state.err = &RequiredNotSetError{p.Name} + } + } else if err == errRepeatedHasNil { + // Give more context to nil values in repeated fields. + return errors.New("repeated field " + p.OrigName + " has nil element") + } else if !state.shouldContinue(err, p) { + return err + } + } + if len(o.buf) > maxMarshalSize { + return ErrTooLarge + } + } + } + + // Do oneof fields. + if prop.oneofMarshaler != nil { + m := structPointer_Interface(base, prop.stype).(Message) + if err := prop.oneofMarshaler(m, o); err == ErrNil { + return errOneofHasNil + } else if err != nil { + return err + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + if len(o.buf)+len(v) > maxMarshalSize { + return ErrTooLarge + } + if len(v) > 0 { + o.buf = append(o.buf, v...) + } + } + + return state.err +} + +func size_struct(prop *StructProperties, base structPointer) (n int) { + for _, i := range prop.order { + p := prop.Prop[i] + if p.size != nil { + n += p.size(p, base) + } + } + + // Add unrecognized fields at the end. + if prop.unrecField.IsValid() { + v := *structPointer_Bytes(base, prop.unrecField) + n += len(v) + } + + // Factor in any oneof fields. + if prop.oneofSizer != nil { + m := structPointer_Interface(base, prop.stype).(Message) + n += prop.oneofSizer(m) + } + + return +} + +var zeroes [20]byte // longer than any conceivable sizeVarint + +// Encode a struct, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error { + return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state) +} + +// Encode something, preceded by its encoded length (as a varint). +func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error { + iLen := len(o.buf) + o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length + iMsg := len(o.buf) + err := enc() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + lMsg := len(o.buf) - iMsg + lLen := sizeVarint(uint64(lMsg)) + switch x := lLen - (iMsg - iLen); { + case x > 0: // actual length is x bytes larger than the space we reserved + // Move msg x bytes right. + o.buf = append(o.buf, zeroes[:x]...) + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + case x < 0: // actual length is x bytes smaller than the space we reserved + // Move msg x bytes left. + copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg]) + o.buf = o.buf[:len(o.buf)+x] // x is negative + } + // Encode the length in the reserved space. + o.buf = o.buf[:iLen] + o.EncodeVarint(uint64(lMsg)) + o.buf = o.buf[:len(o.buf)+lMsg] + return state.err +} + +// errorState maintains the first error that occurs and updates that error +// with additional context. +type errorState struct { + err error +} + +// shouldContinue reports whether encoding should continue upon encountering the +// given error. If the error is RequiredNotSetError, shouldContinue returns true +// and, if this is the first appearance of that error, remembers it for future +// reporting. +// +// If prop is not nil, it may update any error with additional context about the +// field with the error. +func (s *errorState) shouldContinue(err error, prop *Properties) bool { + // Ignore unset required fields. + reqNotSet, ok := err.(*RequiredNotSetError) + if !ok { + return false + } + if s.err == nil { + if prop != nil { + err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field} + } + s.err = err + } + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go new file mode 100644 index 0000000..32111b7 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -0,0 +1,350 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" +) + +func NewRequiredNotSetError(field string) *RequiredNotSetError { + return &RequiredNotSetError{field} +} + +type Sizer interface { + Size() int +} + +func (o *Buffer) enc_ext_slice_byte(p *Properties, base structPointer) error { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return ErrNil + } + o.buf = append(o.buf, s...) + return nil +} + +func size_ext_slice_byte(p *Properties, base structPointer) (n int) { + s := *structPointer_Bytes(base, p.field) + if s == nil { + return 0 + } + n += len(s) + return +} + +// Encode a reference to bool pointer. +func (o *Buffer) enc_ref_bool(p *Properties, base structPointer) error { + v := *structPointer_BoolVal(base, p.field) + x := 0 + if v { + x = 1 + } + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_bool(p *Properties, base structPointer) int { + return len(p.tagcode) + 1 // each bool takes exactly one byte +} + +// Encode a reference to int32 pointer. +func (o *Buffer) enc_ref_int32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_int32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := int32(word32Val_Get(v)) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +func (o *Buffer) enc_ref_uint32(p *Properties, base structPointer) error { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, uint64(x)) + return nil +} + +func size_ref_uint32(p *Properties, base structPointer) (n int) { + v := structPointer_Word32Val(base, p.field) + x := word32Val_Get(v) + n += len(p.tagcode) + n += p.valSize(uint64(x)) + return +} + +// Encode a reference to an int64 pointer. +func (o *Buffer) enc_ref_int64(p *Properties, base structPointer) error { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + o.buf = append(o.buf, p.tagcode...) + p.valEnc(o, x) + return nil +} + +func size_ref_int64(p *Properties, base structPointer) (n int) { + v := structPointer_Word64Val(base, p.field) + x := word64Val_Get(v) + n += len(p.tagcode) + n += p.valSize(x) + return +} + +// Encode a reference to a string pointer. +func (o *Buffer) enc_ref_string(p *Properties, base structPointer) error { + v := *structPointer_StringVal(base, p.field) + o.buf = append(o.buf, p.tagcode...) + o.EncodeStringBytes(v) + return nil +} + +func size_ref_string(p *Properties, base structPointer) (n int) { + v := *structPointer_StringVal(base, p.field) + n += len(p.tagcode) + n += sizeStringBytes(v) + return +} + +// Encode a reference to a message struct. +func (o *Buffer) enc_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil + } + + o.buf = append(o.buf, p.tagcode...) + return o.enc_len_struct(p.sprop, structp, &state) +} + +//TODO this is only copied, please fix this +func size_ref_struct_message(p *Properties, base structPointer) int { + structp := structPointer_GetRefStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n0 := len(p.tagcode) + n1 := sizeRawBytes(data) + return n0 + n1 + } + + n0 := len(p.tagcode) + n1 := size_struct(p.sprop, structp) + n2 := sizeVarint(uint64(n1)) // size of encoded length + return n0 + n1 + n2 +} + +// Encode a slice of references to message struct pointers ([]struct). +func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { + var state errorState + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return errRepeatedHasNil + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, err := m.Marshal() + if err != nil && !state.shouldContinue(err, nil) { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + continue + } + + o.buf = append(o.buf, p.tagcode...) + err := o.enc_len_struct(p.sprop, structp, &state) + if err != nil && !state.shouldContinue(err, nil) { + if err == ErrNil { + return errRepeatedHasNil + } + return err + } + + } + return state.err +} + +//TODO this is only copied, please fix this +func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + structp := ss.Index(i) + if structPointer_IsNil(structp) { + return // return the size up to this point + } + + // Can the object marshal itself? + if p.isMarshaler { + m := structPointer_Interface(structp, p.stype).(Marshaler) + data, _ := m.Marshal() + n += len(p.tagcode) + n += sizeRawBytes(data) + continue + } + + n0 := size_struct(p.sprop, structp) + n1 := sizeVarint(uint64(n0)) // size of encoded length + n += n0 + n1 + } + return +} + +func (o *Buffer) enc_custom_bytes(p *Properties, base structPointer) error { + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return ErrNil + } + custom := i.(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceRef(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_ref_bytes(p *Properties, base structPointer) error { + custom := structPointer_InterfaceAt(base, p.field, p.ctype).(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + if data == nil { + return ErrNil + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_custom_ref_bytes(p *Properties, base structPointer) (n int) { + n += len(p.tagcode) + i := structPointer_InterfaceAt(base, p.field, p.ctype) + if i == nil { + return 0 + } + custom := i.(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + return +} + +func (o *Buffer) enc_custom_slice_bytes(p *Properties, base structPointer) error { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return ErrNil + } + slice := reflect.ValueOf(inter) + l := slice.Len() + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, err := custom.Marshal() + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_custom_slice_bytes(p *Properties, base structPointer) (n int) { + inter := structPointer_InterfaceRef(base, p.field, p.ctype) + if inter == nil { + return 0 + } + slice := reflect.ValueOf(inter) + l := slice.Len() + n += l * len(p.tagcode) + for i := 0; i < l; i++ { + v := slice.Index(i) + custom := v.Interface().(Marshaler) + data, _ := custom.Marshal() + n += sizeRawBytes(data) + } + return +} diff --git a/vendor/github.com/gogo/protobuf/proto/equal.go b/vendor/github.com/gogo/protobuf/proto/equal.go new file mode 100644 index 0000000..2ed1cf5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/equal.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + b1, ok := f1.Interface().(raw) + if ok { + b2 := f2.Interface().(raw) + // RawMessage + if !bytes.Equal(b1.Bytes(), b2.Bytes()) { + return false + } + continue + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + if !bytes.Equal(u1, u2) { + return false + } + + return true +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1, m2 := e1.value, e2.value + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + continue + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions.go b/vendor/github.com/gogo/protobuf/proto/extensions.go new file mode 100644 index 0000000..0dfcb53 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions.go @@ -0,0 +1,693 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +type extensionsBytes interface { + Message + ExtensionRangeArray() []ExtensionRange + GetExtensions() *[]byte +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, bool) { + if ep, ok := p.(extendableProto); ok { + return ep, ok + } + if ep, ok := p.(extendableProtoV1); ok { + return extensionAdapter{ep}, ok + } + return nil, false +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +type extensionRange interface { + Message + ExtensionRangeArray() []ExtensionRange +} + +var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem() +var extendableProtoV1Type = reflect.TypeOf((*extendableProtoV1)(nil)).Elem() +var extendableBytesType = reflect.TypeOf((*extensionsBytes)(nil)).Elem() +var extensionRangeType = reflect.TypeOf((*extensionRange)(nil)).Elem() + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + value interface{} + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + if ebase, ok := base.(extensionsBytes); ok { + clearExtension(base, id) + ext := ebase.GetExtensions() + *ext = append(*ext, b...) + return + } + epb, ok := extendable(base) + if !ok { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extensionRange, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String()) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensions(e *XXX_InternalExtensions) error { + m, mu := e.extensionsRead() + if m == nil { + return nil // fast path + } + mu.Lock() + defer mu.Unlock() + return encodeExtensionsMap(m) +} + +// encode encodes any unmarshaled (unencoded) extensions in e. +func encodeExtensionsMap(m map[int32]Extension) error { + for k, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + m[k] = e + } + return nil +} + +func extensionsSize(e *XXX_InternalExtensions) (n int) { + m, mu := e.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + defer mu.Unlock() + return extensionsMapSize(m) +} + +func extensionsMapSize(m map[int32]Extension) (n int) { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + n += props.size(props, toStructPointer(x)) + } + return +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + buf := *ext + o := 0 + for o < len(buf) { + tag, n := DecodeVarint(buf[o:]) + fieldNum := int32(tag >> 3) + if int32(fieldNum) == extension.Field { + return true + } + wireType := int(tag & 0x7) + o += n + l, err := size(buf[o:], wireType) + if err != nil { + return false + } + o += l + } + return false + } + // TODO: Check types, field numbers, etc.? + epb, ok := extendable(pb) + if !ok { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok = extmap[extension.Field] + mu.Unlock() + return ok +} + +func deleteExtension(pb extensionsBytes, theFieldNum int32, offset int) int { + ext := pb.GetExtensions() + for offset < len(*ext) { + tag, n1 := DecodeVarint((*ext)[offset:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + n2, err := size((*ext)[offset+n1:], wireType) + if err != nil { + panic(err) + } + newOffset := offset + n1 + n2 + if fieldNum == theFieldNum { + *ext = append((*ext)[:offset], (*ext)[newOffset:]...) + return offset + } + offset = newOffset + } + return -1 +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + clearExtension(pb, extension.Field) +} + +func clearExtension(pb Message, fieldNum int32) { + if epb, doki := pb.(extensionsBytes); doki { + offset := 0 + for offset != -1 { + offset = deleteExtension(epb, fieldNum, offset) + } + return + } + epb, ok := extendable(pb) + if !ok { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, fieldNum) +} + +// GetExtension parses and returns the given extension of pb. +// If the extension is not present and has no default value it returns ErrMissingExtension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + o := 0 + for o < len(*ext) { + tag, n := DecodeVarint((*ext)[o:]) + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size((*ext)[o+n:], wireType) + if err != nil { + return nil, err + } + if int32(fieldNum) == extension.Field { + v, err := decodeExtension((*ext)[o:o+n+l], extension) + if err != nil { + return nil, err + } + return v, nil + } + o += n + l + } + return defaultExtensionValue(extension) + } + epb, ok := extendable(pb) + if !ok { + return nil, errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return e.value, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = v + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return e.value, nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + o := NewBuffer(b) + + t := reflect.TypeOf(extension.ExtensionType) + + props := extensionProperties(extension) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate a "field" to store the pointer/slice itself; the + // pointer/slice will be stored here. We pass + // the address of this field to props.dec. + // This passes a zero field and a *t and lets props.dec + // interpret it as a *struct{ x t }. + value := reflect.New(t).Elem() + + for { + // Discard wire type and field number varint. It isn't needed. + if _, err := o.DecodeVarint(); err != nil { + return nil, err + } + + if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil { + return nil, err + } + + if o.index >= len(o.buf) { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(pb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, ok := extendable(pb) + if !ok { + return nil, fmt.Errorf("proto: %T is not an extendable proto.Message", pb) + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + if epb, doki := pb.(extensionsBytes); doki { + ClearExtension(pb, extension) + ext := epb.GetExtensions() + et := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + p := NewBuffer(nil) + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + *ext = append(*ext, p.buf...) + return nil + } + epb, ok := extendable(pb) + if !ok { + return errors.New("proto: not an extendable proto") + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return errors.New("proto: bad extension value type") + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: value} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + if epb, doki := pb.(extensionsBytes); doki { + ext := epb.GetExtensions() + *ext = []byte{} + return + } + epb, ok := extendable(pb) + if !ok { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} diff --git a/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go new file mode 100644 index 0000000..ea6478f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/extensions_gogo.go @@ -0,0 +1,294 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "bytes" + "errors" + "fmt" + "reflect" + "sort" + "strings" + "sync" +) + +func GetBoolExtension(pb Message, extension *ExtensionDesc, ifnotset bool) bool { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*bool) == nil { + return ifnotset + } + return *(value.(*bool)) +} + +func (this *Extension) Equal(that *Extension) bool { + return bytes.Equal(this.enc, that.enc) +} + +func (this *Extension) Compare(that *Extension) int { + return bytes.Compare(this.enc, that.enc) +} + +func SizeOfInternalExtension(m extendableProto) (n int) { + return SizeOfExtensionMap(m.extensionsWrite()) +} + +func SizeOfExtensionMap(m map[int32]Extension) (n int) { + return extensionsMapSize(m) +} + +type sortableMapElem struct { + field int32 + ext Extension +} + +func newSortableExtensionsFromMap(m map[int32]Extension) sortableExtensions { + s := make(sortableExtensions, 0, len(m)) + for k, v := range m { + s = append(s, &sortableMapElem{field: k, ext: v}) + } + return s +} + +type sortableExtensions []*sortableMapElem + +func (this sortableExtensions) Len() int { return len(this) } + +func (this sortableExtensions) Swap(i, j int) { this[i], this[j] = this[j], this[i] } + +func (this sortableExtensions) Less(i, j int) bool { return this[i].field < this[j].field } + +func (this sortableExtensions) String() string { + sort.Sort(this) + ss := make([]string, len(this)) + for i := range this { + ss[i] = fmt.Sprintf("%d: %v", this[i].field, this[i].ext) + } + return "map[" + strings.Join(ss, ",") + "]" +} + +func StringFromInternalExtension(m extendableProto) string { + return StringFromExtensionsMap(m.extensionsWrite()) +} + +func StringFromExtensionsMap(m map[int32]Extension) string { + return newSortableExtensionsFromMap(m).String() +} + +func StringFromExtensionsBytes(ext []byte) string { + m, err := BytesToExtensionsMap(ext) + if err != nil { + panic(err) + } + return StringFromExtensionsMap(m) +} + +func EncodeInternalExtension(m extendableProto, data []byte) (n int, err error) { + return EncodeExtensionMap(m.extensionsWrite(), data) +} + +func EncodeExtensionMap(m map[int32]Extension, data []byte) (n int, err error) { + if err := encodeExtensionsMap(m); err != nil { + return 0, err + } + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + for _, k := range keys { + n += copy(data[n:], m[int32(k)].enc) + } + return n, nil +} + +func GetRawExtension(m map[int32]Extension, id int32) ([]byte, error) { + if m[id].value == nil || m[id].desc == nil { + return m[id].enc, nil + } + if err := encodeExtensionsMap(m); err != nil { + return nil, err + } + return m[id].enc, nil +} + +func size(buf []byte, wire int) (int, error) { + switch wire { + case WireVarint: + _, n := DecodeVarint(buf) + return n, nil + case WireFixed64: + return 8, nil + case WireBytes: + v, n := DecodeVarint(buf) + return int(v) + n, nil + case WireFixed32: + return 4, nil + case WireStartGroup: + offset := 0 + for { + u, n := DecodeVarint(buf[offset:]) + fwire := int(u & 0x7) + offset += n + if fwire == WireEndGroup { + return offset, nil + } + s, err := size(buf[offset:], wire) + if err != nil { + return 0, err + } + offset += s + } + } + return 0, fmt.Errorf("proto: can't get size for unknown wire type %d", wire) +} + +func BytesToExtensionsMap(buf []byte) (map[int32]Extension, error) { + m := make(map[int32]Extension) + i := 0 + for i < len(buf) { + tag, n := DecodeVarint(buf[i:]) + if n <= 0 { + return nil, fmt.Errorf("unable to decode varint") + } + fieldNum := int32(tag >> 3) + wireType := int(tag & 0x7) + l, err := size(buf[i+n:], wireType) + if err != nil { + return nil, err + } + end := i + int(l) + n + m[int32(fieldNum)] = Extension{enc: buf[i:end]} + i = end + } + return m, nil +} + +func NewExtension(e []byte) Extension { + ee := Extension{enc: make([]byte, len(e))} + copy(ee.enc, e) + return ee +} + +func AppendExtension(e Message, tag int32, buf []byte) { + if ee, eok := e.(extensionsBytes); eok { + ext := ee.GetExtensions() + *ext = append(*ext, buf...) + return + } + if ee, eok := e.(extendableProto); eok { + m := ee.extensionsWrite() + ext := m[int32(tag)] // may be missing + ext.enc = append(ext.enc, buf...) + m[int32(tag)] = ext + } +} + +func encodeExtension(e *Extension) error { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + return nil + } + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + et := reflect.TypeOf(e.desc.ExtensionType) + props := extensionProperties(e.desc) + + p := NewBuffer(nil) + // If e.value has type T, the encoder expects a *struct{ X T }. + // Pass a *T with a zero field and hope it all works out. + x := reflect.New(et) + x.Elem().Set(reflect.ValueOf(e.value)) + if err := props.enc(p, props, toStructPointer(x)); err != nil { + return err + } + e.enc = p.buf + return nil +} + +func (this Extension) GoString() string { + if this.enc == nil { + if err := encodeExtension(&this); err != nil { + panic(err) + } + } + return fmt.Sprintf("proto.NewExtension(%#v)", this.enc) +} + +func SetUnsafeExtension(pb Message, fieldNum int32, value interface{}) error { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return errors.New("proto: bad extension number; not in declared ranges") + } + return SetExtension(pb, desc, value) +} + +func GetUnsafeExtension(pb Message, fieldNum int32) (interface{}, error) { + typ := reflect.TypeOf(pb).Elem() + ext, ok := extensionMaps[typ] + if !ok { + return nil, fmt.Errorf("proto: bad extended type; %s is not extendable", typ.String()) + } + desc, ok := ext[fieldNum] + if !ok { + return nil, fmt.Errorf("unregistered field number %d", fieldNum) + } + return GetExtension(pb, desc) +} + +func NewUnsafeXXX_InternalExtensions(m map[int32]Extension) XXX_InternalExtensions { + x := &XXX_InternalExtensions{ + p: new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }), + } + x.p.extensionMap = m + return *x +} + +func GetUnsafeExtensionsMap(extendable Message) map[int32]Extension { + pb := extendable.(extendableProto) + return pb.extensionsWrite() +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go new file mode 100644 index 0000000..c98d73d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -0,0 +1,897 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/gogo/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/gogo/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// Stats records allocation details about the protocol buffer encoders +// and decoders. Useful for tuning the library itself. +type Stats struct { + Emalloc uint64 // mallocs in encode + Dmalloc uint64 // mallocs in decode + Encode uint64 // number of encodes + Decode uint64 // number of decodes + Chit uint64 // number of cache hits + Cmiss uint64 // number of cache misses + Size uint64 // number of sizes +} + +// Set to true to enable stats collection. +const collectStats = false + +var stats Stats + +// GetStats returns a copy of the global Stats structure. +func GetStats() Stats { return stats } + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + // pools of basic types to amortize allocation. + bools []bool + uint32s []uint32 + uint64s []uint64 + + // extra pools, only used with pointer_reflect.go + int32s []int32 + int64s []int64 + float32s []float32 + float64s []float64 +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + sindex := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = sindex +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// Map fields may have key types of non-float scalars, strings and enums. +// The easiest way to sort them in some deterministic order is to use fmt. +// If this turns out to be inefficient we can always consider other options, +// such as doing a Schwartzian transform. + +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{ + vs: vs, + // default Less function: textual comparison + less: func(a, b reflect.Value) bool { + return fmt.Sprint(a.Interface()) < fmt.Sprint(b.Interface()) + }, + } + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps; + // numeric keys are sorted numerically. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +// ProtoPackageIsVersion2 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion2 = true + +// ProtoPackageIsVersion1 is referenced from generated protocol buffer files +// to assert that that code is compatible with this version of the proto package. +const GoGoProtoPackageIsVersion1 = true diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go new file mode 100644 index 0000000..4b4f7c9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -0,0 +1,42 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "encoding/json" + "strconv" +) + +func MarshalJSONEnum(m map[int32]string, value int32) ([]byte, error) { + s, ok := m[value] + if !ok { + s = strconv.Itoa(int(value)) + } + return json.Marshal(s) +} diff --git a/vendor/github.com/gogo/protobuf/proto/message_set.go b/vendor/github.com/gogo/protobuf/proto/message_set.go new file mode 100644 index 0000000..fd982de --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/message_set.go @@ -0,0 +1,311 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + if ms.find(pb) != nil { + return true + } + return false +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// MarshalMessageSet encodes the extension map represented by m in the message set wire format. +// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSet(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + if err := encodeExtensions(exts); err != nil { + return nil, err + } + m, _ = exts.extensionsRead() + case map[int32]Extension: + if err := encodeExtensionsMap(exts); err != nil { + return nil, err + } + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + + // Sort extension IDs to provide a deterministic encoding. + // See also enc_map in encode.go. + ids := make([]int, 0, len(m)) + for id := range m { + ids = append(ids, int(id)) + } + sort.Ints(ids) + + ms := &messageSet{Item: make([]*_MessageSet_Item, 0, len(m))} + for _, id := range ids { + e := m[int32(id)] + // Remove the wire type and field number varint, as well as the length varint. + msg := skipVarint(skipVarint(e.enc)) + + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: Int32(int32(id)), + Message: msg, + }) + } + return Marshal(ms) +} + +// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} + +// MarshalMessageSetJSON encodes the extension map represented by m in JSON format. +// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func MarshalMessageSetJSON(exts interface{}) ([]byte, error) { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m, _ = exts.extensionsRead() + case map[int32]Extension: + m = exts + default: + return nil, errors.New("proto: not an extension map") + } + var b bytes.Buffer + b.WriteByte('{') + + // Process the map in key order for deterministic output. + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) // int32Slice defined in text.go + + for i, id := range ids { + ext := m[id] + if i > 0 { + b.WriteByte(',') + } + + msd, ok := messageSetMap[id] + if !ok { + // Unknown type; we can't render it, so skip it. + continue + } + fmt.Fprintf(&b, `"[%s]":`, msd.name) + + x := ext.value + if x == nil { + x = reflect.New(msd.t.Elem()).Interface() + if err := Unmarshal(ext.enc, x.(Message)); err != nil { + return nil, err + } + } + d, err := json.Marshal(x) + if err != nil { + return nil, err + } + b.Write(d) + } + b.WriteByte('}') + return b.Bytes(), nil +} + +// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format. +// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option. +func UnmarshalMessageSetJSON(buf []byte, exts interface{}) error { + // Common-case fast path. + if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) { + return nil + } + + // This is fairly tricky, and it's not clear that it is needed. + return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented") +} + +// A global registry of types that can be used in a MessageSet. + +var messageSetMap = make(map[int32]messageSetDesc) + +type messageSetDesc struct { + t reflect.Type // pointer to struct + name string +} + +// RegisterMessageSetType is called from the generated code. +func RegisterMessageSetType(m Message, fieldNum int32, name string) { + messageSetMap[fieldNum] = messageSetDesc{ + t: reflect.TypeOf(m), + name: name, + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go new file mode 100644 index 0000000..fb512e2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect.go @@ -0,0 +1,484 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "math" + "reflect" +) + +// A structPointer is a pointer to a struct. +type structPointer struct { + v reflect.Value +} + +// toStructPointer returns a structPointer equivalent to the given reflect value. +// The reflect value must itself be a pointer to a struct. +func toStructPointer(v reflect.Value) structPointer { + return structPointer{v} +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p.v.IsNil() +} + +// Interface returns the struct pointer as an interface value. +func structPointer_Interface(p structPointer, _ reflect.Type) interface{} { + return p.v.Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// field returns the given field in the struct as a reflect value. +func structPointer_field(p structPointer, f field) reflect.Value { + // Special case: an extension map entry with a value of type T + // passes a *T to the struct-handling code with a zero field, + // expecting that it will be treated as equivalent to *struct{ X T }, + // which has the same memory layout. We have to handle that case + // specially, because reflect will panic if we call FieldByIndex on a + // non-struct. + if f == nil { + return p.v.Elem() + } + + return p.v.Elem().FieldByIndex(f) +} + +// ifield returns the given field in the struct as an interface value. +func structPointer_ifield(p structPointer, f field) interface{} { + return structPointer_field(p, f).Addr().Interface() +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return structPointer_ifield(p, f).(*[]byte) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return structPointer_ifield(p, f).(*[][]byte) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return structPointer_ifield(p, f).(**bool) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return structPointer_ifield(p, f).(*bool) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return structPointer_ifield(p, f).(*[]bool) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return structPointer_ifield(p, f).(**string) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return structPointer_ifield(p, f).(*string) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return structPointer_ifield(p, f).(*[]string) +} + +// Extensions returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return structPointer_ifield(p, f).(*XXX_InternalExtensions) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return structPointer_ifield(p, f).(*map[int32]Extension) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return structPointer_field(p, f).Addr() +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + structPointer_field(p, f).Set(q.v) +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return structPointer{structPointer_field(p, f)} +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice { + return structPointerSlice{structPointer_field(p, f)} +} + +// A structPointerSlice represents the address of a slice of pointers to structs +// (themselves messages or groups). That is, v.Type() is *[]*struct{...}. +type structPointerSlice struct { + v reflect.Value +} + +func (p structPointerSlice) Len() int { return p.v.Len() } +func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} } +func (p structPointerSlice) Append(q structPointer) { + p.v.Set(reflect.Append(p.v, q.v)) +} + +var ( + int32Type = reflect.TypeOf(int32(0)) + uint32Type = reflect.TypeOf(uint32(0)) + float32Type = reflect.TypeOf(float32(0)) + int64Type = reflect.TypeOf(int64(0)) + uint64Type = reflect.TypeOf(uint64(0)) + float64Type = reflect.TypeOf(float64(0)) +) + +// A word32 represents a field of type *int32, *uint32, *float32, or *enum. +// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable. +type word32 struct { + v reflect.Value +} + +// IsNil reports whether p is nil. +func word32_IsNil(p word32) bool { + return p.v.IsNil() +} + +// Set sets p to point at a newly allocated word with bits set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + t := p.v.Type().Elem() + switch t { + case int32Type: + if len(o.int32s) == 0 { + o.int32s = make([]int32, uint32PoolSize) + } + o.int32s[0] = int32(x) + p.v.Set(reflect.ValueOf(&o.int32s[0])) + o.int32s = o.int32s[1:] + return + case uint32Type: + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + p.v.Set(reflect.ValueOf(&o.uint32s[0])) + o.uint32s = o.uint32s[1:] + return + case float32Type: + if len(o.float32s) == 0 { + o.float32s = make([]float32, uint32PoolSize) + } + o.float32s[0] = math.Float32frombits(x) + p.v.Set(reflect.ValueOf(&o.float32s[0])) + o.float32s = o.float32s[1:] + return + } + + // must be enum + p.v.Set(reflect.New(t)) + p.v.Elem().SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32_Get(p word32) uint32 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32{structPointer_field(p, f)} +} + +// A word32Val represents a field of type int32, uint32, float32, or enum. +// That is, v.Type() is int32, uint32, float32, or enum and v is assignable. +type word32Val struct { + v reflect.Value +} + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + switch p.v.Type() { + case int32Type: + p.v.SetInt(int64(x)) + return + case uint32Type: + p.v.SetUint(uint64(x)) + return + case float32Type: + p.v.SetFloat(float64(math.Float32frombits(x))) + return + } + + // must be enum + p.v.SetInt(int64(int32(x))) +} + +// Get gets the bits pointed at by p, as a uint32. +func word32Val_Get(p word32Val) uint32 { + elem := p.v + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val{structPointer_field(p, f)} +} + +// A word32Slice is a slice of 32-bit values. +// That is, v.Type() is []int32, []uint32, []float32, or []enum. +type word32Slice struct { + v reflect.Value +} + +func (p word32Slice) Append(x uint32) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int32: + elem.SetInt(int64(int32(x))) + case reflect.Uint32: + elem.SetUint(uint64(x)) + case reflect.Float32: + elem.SetFloat(float64(math.Float32frombits(x))) + } +} + +func (p word32Slice) Len() int { + return p.v.Len() +} + +func (p word32Slice) Index(i int) uint32 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int32: + return uint32(elem.Int()) + case reflect.Uint32: + return uint32(elem.Uint()) + case reflect.Float32: + return math.Float32bits(float32(elem.Float())) + } + panic("unreachable") +} + +// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) word32Slice { + return word32Slice{structPointer_field(p, f)} +} + +// word64 is like word32 but for 64-bit values. +type word64 struct { + v reflect.Value +} + +func word64_Set(p word64, o *Buffer, x uint64) { + t := p.v.Type().Elem() + switch t { + case int64Type: + if len(o.int64s) == 0 { + o.int64s = make([]int64, uint64PoolSize) + } + o.int64s[0] = int64(x) + p.v.Set(reflect.ValueOf(&o.int64s[0])) + o.int64s = o.int64s[1:] + return + case uint64Type: + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + p.v.Set(reflect.ValueOf(&o.uint64s[0])) + o.uint64s = o.uint64s[1:] + return + case float64Type: + if len(o.float64s) == 0 { + o.float64s = make([]float64, uint64PoolSize) + } + o.float64s[0] = math.Float64frombits(x) + p.v.Set(reflect.ValueOf(&o.float64s[0])) + o.float64s = o.float64s[1:] + return + } + panic("unreachable") +} + +func word64_IsNil(p word64) bool { + return p.v.IsNil() +} + +func word64_Get(p word64) uint64 { + elem := p.v.Elem() + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64{structPointer_field(p, f)} +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val struct { + v reflect.Value +} + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + switch p.v.Type() { + case int64Type: + p.v.SetInt(int64(x)) + return + case uint64Type: + p.v.SetUint(x) + return + case float64Type: + p.v.SetFloat(math.Float64frombits(x)) + return + } + panic("unreachable") +} + +func word64Val_Get(p word64Val) uint64 { + elem := p.v + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return elem.Uint() + case reflect.Float64: + return math.Float64bits(elem.Float()) + } + panic("unreachable") +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val{structPointer_field(p, f)} +} + +type word64Slice struct { + v reflect.Value +} + +func (p word64Slice) Append(x uint64) { + n, m := p.v.Len(), p.v.Cap() + if n < m { + p.v.SetLen(n + 1) + } else { + t := p.v.Type().Elem() + p.v.Set(reflect.Append(p.v, reflect.Zero(t))) + } + elem := p.v.Index(n) + switch elem.Kind() { + case reflect.Int64: + elem.SetInt(int64(int64(x))) + case reflect.Uint64: + elem.SetUint(uint64(x)) + case reflect.Float64: + elem.SetFloat(float64(math.Float64frombits(x))) + } +} + +func (p word64Slice) Len() int { + return p.v.Len() +} + +func (p word64Slice) Index(i int) uint64 { + elem := p.v.Index(i) + switch elem.Kind() { + case reflect.Int64: + return uint64(elem.Int()) + case reflect.Uint64: + return uint64(elem.Uint()) + case reflect.Float64: + return math.Float64bits(float64(elem.Float())) + } + panic("unreachable") +} + +func structPointer_Word64Slice(p structPointer, f field) word64Slice { + return word64Slice{structPointer_field(p, f)} +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 0000000..1763a5f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go new file mode 100644 index 0000000..6b5567d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,270 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +// NOTE: These type_Foo functions would more idiomatically be methods, +// but Go does not allow methods on pointer types, and we must preserve +// some pointer type for the garbage collector. We use these +// funcs with clunky names as our poor approximation to methods. +// +// An alternative would be +// type structPointer struct { p unsafe.Pointer } +// but that does not registerize as well. + +// A structPointer is a pointer to a struct. +type structPointer unsafe.Pointer + +// toStructPointer returns a structPointer equivalent to the given reflect value. +func toStructPointer(v reflect.Value) structPointer { + return structPointer(unsafe.Pointer(v.Pointer())) +} + +// IsNil reports whether p is nil. +func structPointer_IsNil(p structPointer) bool { + return p == nil +} + +// Interface returns the struct pointer, assumed to have element type t, +// as an interface value. +func structPointer_Interface(p structPointer, t reflect.Type) interface{} { + return reflect.NewAt(t, unsafe.Pointer(p)).Interface() +} + +// A field identifies a field in a struct, accessible from a structPointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != ^field(0) +} + +// Bytes returns the address of a []byte field in the struct. +func structPointer_Bytes(p structPointer, f field) *[]byte { + return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BytesSlice returns the address of a [][]byte field in the struct. +func structPointer_BytesSlice(p structPointer, f field) *[][]byte { + return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// Bool returns the address of a *bool field in the struct. +func structPointer_Bool(p structPointer, f field) **bool { + return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolVal returns the address of a bool field in the struct. +func structPointer_BoolVal(p structPointer, f field) *bool { + return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// BoolSlice returns the address of a []bool field in the struct. +func structPointer_BoolSlice(p structPointer, f field) *[]bool { + return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// String returns the address of a *string field in the struct. +func structPointer_String(p structPointer, f field) **string { + return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringVal returns the address of a string field in the struct. +func structPointer_StringVal(p structPointer, f field) *string { + return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StringSlice returns the address of a []string field in the struct. +func structPointer_StringSlice(p structPointer, f field) *[]string { + return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// ExtMap returns the address of an extension map field in the struct. +func structPointer_Extensions(p structPointer, f field) *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension { + return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// NewAt returns the reflect.Value for a pointer to a field in the struct. +func structPointer_NewAt(p structPointer, f field, typ reflect.Type) reflect.Value { + return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f))) +} + +// SetStructPointer writes a *struct field in the struct. +func structPointer_SetStructPointer(p structPointer, f field, q structPointer) { + *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q +} + +// GetStructPointer reads a *struct field in the struct. +func structPointer_GetStructPointer(p structPointer, f field) structPointer { + return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// StructPointerSlice the address of a []*struct field in the struct. +func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice { + return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups). +type structPointerSlice []structPointer + +func (v *structPointerSlice) Len() int { return len(*v) } +func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] } +func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) } + +// A word32 is the address of a "pointer to 32-bit value" field. +type word32 **uint32 + +// IsNil reports whether *v is nil. +func word32_IsNil(p word32) bool { + return *p == nil +} + +// Set sets *v to point at a newly allocated word set to x. +func word32_Set(p word32, o *Buffer, x uint32) { + if len(o.uint32s) == 0 { + o.uint32s = make([]uint32, uint32PoolSize) + } + o.uint32s[0] = x + *p = &o.uint32s[0] + o.uint32s = o.uint32s[1:] +} + +// Get gets the value pointed at by *v. +func word32_Get(p word32) uint32 { + return **p +} + +// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32(p structPointer, f field) word32 { + return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Val is the address of a 32-bit value field. +type word32Val *uint32 + +// Set sets *p to x. +func word32Val_Set(p word32Val, x uint32) { + *p = x +} + +// Get gets the value pointed at by p. +func word32Val_Get(p word32Val) uint32 { + return *p +} + +// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct. +func structPointer_Word32Val(p structPointer, f field) word32Val { + return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// A word32Slice is a slice of 32-bit values. +type word32Slice []uint32 + +func (v *word32Slice) Append(x uint32) { *v = append(*v, x) } +func (v *word32Slice) Len() int { return len(*v) } +func (v *word32Slice) Index(i int) uint32 { return (*v)[i] } + +// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct. +func structPointer_Word32Slice(p structPointer, f field) *word32Slice { + return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +// word64 is like word32 but for 64-bit values. +type word64 **uint64 + +func word64_Set(p word64, o *Buffer, x uint64) { + if len(o.uint64s) == 0 { + o.uint64s = make([]uint64, uint64PoolSize) + } + o.uint64s[0] = x + *p = &o.uint64s[0] + o.uint64s = o.uint64s[1:] +} + +func word64_IsNil(p word64) bool { + return *p == nil +} + +func word64_Get(p word64) uint64 { + return **p +} + +func structPointer_Word64(p structPointer, f field) word64 { + return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Val is like word32Val but for 64-bit values. +type word64Val *uint64 + +func word64Val_Set(p word64Val, o *Buffer, x uint64) { + *p = x +} + +func word64Val_Get(p word64Val) uint64 { + return *p +} + +func structPointer_Word64Val(p structPointer, f field) word64Val { + return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +// word64Slice is like word32Slice but for 64-bit values. +type word64Slice []uint64 + +func (v *word64Slice) Append(x uint64) { *v = append(*v, x) } +func (v *word64Slice) Len() int { return len(*v) } +func (v *word64Slice) Index(i int) uint64 { return (*v)[i] } + +func structPointer_Word64Slice(p structPointer, f field) *word64Slice { + return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go new file mode 100644 index 0000000..f156a29 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -0,0 +1,128 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "unsafe" +) + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + return r.Interface() +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + point := unsafe.Pointer(uintptr(p) + uintptr(f)) + r := reflect.NewAt(t, point) + if r.Elem().IsNil() { + return nil + } + return r.Elem().Interface() +} + +func copyUintPtr(oldptr, newptr uintptr, size int) { + oldbytes := make([]byte, 0) + oldslice := (*reflect.SliceHeader)(unsafe.Pointer(&oldbytes)) + oldslice.Data = oldptr + oldslice.Len = size + oldslice.Cap = size + newbytes := make([]byte, 0) + newslice := (*reflect.SliceHeader)(unsafe.Pointer(&newbytes)) + newslice.Data = newptr + newslice.Len = size + newslice.Cap = size + copy(newbytes, oldbytes) +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + copyUintPtr(uintptr(oldptr), uintptr(newptr), size) +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + size := typ.Elem().Size() + + oldHeader := structPointer_GetSliceHeader(base, f) + oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() + newLen := oldHeader.Len + 1 + newSlice := reflect.MakeSlice(typ, newLen, newLen) + reflect.Copy(newSlice, oldSlice) + bas := toStructPointer(newSlice) + oldHeader.Data = uintptr(bas) + oldHeader.Len = newLen + oldHeader.Cap = newLen + + return structPointer(unsafe.Pointer(uintptr(unsafe.Pointer(bas)) + uintptr(uintptr(newLen-1)*size))) +} + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + return structPointer((*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))) +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + return (*reflect.SliceHeader)(unsafe.Pointer(uintptr(p) + uintptr(f))) +} + +func structPointer_Add(p structPointer, size field) structPointer { + return structPointer(unsafe.Pointer(uintptr(p) + uintptr(size))) +} + +func structPointer_Len(p structPointer, f field) int { + return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go new file mode 100644 index 0000000..44b3320 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -0,0 +1,968 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "os" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +const startSize = 10 // initial slice/string sizes + +// Encoders are defined in encode.go +// An encoder outputs the full representation of a field, including its +// tag and encoder type. +type encoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueEncoder encodes a single integer in a particular encoding. +type valueEncoder func(o *Buffer, x uint64) error + +// Sizers are defined in encode.go +// A sizer returns the encoded size of a field, including its tag and encoder +// type. +type sizer func(prop *Properties, base structPointer) int + +// A valueSizer returns the encoded size of a single integer in a particular +// encoding. +type valueSizer func(x uint64) int + +// Decoders are defined in decode.go +// A decoder creates a value from its wire representation. +// Unrecognized subelements are saved in unrec. +type decoder func(p *Buffer, prop *Properties, base structPointer) error + +// A valueDecoder decodes a single integer in a particular encoding. +type valueDecoder func(o *Buffer) (x uint64, err error) + +// A oneofMarshaler does the marshaling for all oneof fields in a message. +type oneofMarshaler func(Message, *Buffer) error + +// A oneofUnmarshaler does the unmarshaling for a oneof field in a message. +type oneofUnmarshaler func(Message, int, int, *Buffer) (bool, error) + +// A oneofSizer does the sizing for all oneof fields in a message. +type oneofSizer func(Message) int + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + unrecField field // field id of the XXX_unrecognized []byte field + extendable bool // is this an extendable proto + + oneofMarshaler oneofMarshaler + oneofUnmarshaler oneofUnmarshaler + oneofSizer oneofSizer + stype reflect.Type + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field; set for []byte only + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + StdTime bool + StdDuration bool + + enc encoder + valEnc valueEncoder // set for bool and numeric types only + field field + tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType) + tagbuf [8]byte + stype reflect.Type // set for struct types only + sstype reflect.Type // set for slices of structs types only + ctype reflect.Type // set for custom types only + sprop *StructProperties // set for struct types only + isMarshaler bool + isUnmarshaler bool + + mtype reflect.Type // set for map types only + mkeyprop *Properties // set for map types only + mvalprop *Properties // set for map types only + + size sizer + valSize valueSizer // set for bool and numeric types only + + dec decoder + valDec valueDecoder // set for bool and numeric types only + + // If this is a packable field, this will be the decoder for the packed version of the field. + packedDec decoder +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s = "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeVarint + p.valDec = (*Buffer).DecodeVarint + p.valSize = sizeVarint + case "fixed32": + p.WireType = WireFixed32 + p.valEnc = (*Buffer).EncodeFixed32 + p.valDec = (*Buffer).DecodeFixed32 + p.valSize = sizeFixed32 + case "fixed64": + p.WireType = WireFixed64 + p.valEnc = (*Buffer).EncodeFixed64 + p.valDec = (*Buffer).DecodeFixed64 + p.valSize = sizeFixed64 + case "zigzag32": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag32 + p.valDec = (*Buffer).DecodeZigzag32 + p.valSize = sizeZigzag32 + case "zigzag64": + p.WireType = WireVarint + p.valEnc = (*Buffer).EncodeZigzag64 + p.valDec = (*Buffer).DecodeZigzag64 + p.valSize = sizeZigzag64 + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break + } + case strings.HasPrefix(f, "embedded="): + p.OrigName = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "customtype="): + p.CustomType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true + } + } +} + +func logNoSliceEnc(t1, t2 reflect.Type) { + fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2) +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// Initialize the fields for encoding and decoding. +func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + p.enc = nil + p.dec = nil + p.size = nil + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { + p.setCustomEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } + switch t1 := typ; t1.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) + + // proto3 scalar types + + case reflect.Bool: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_proto3_bool + } else { + p.enc = (*Buffer).enc_ref_bool + p.dec = (*Buffer).dec_proto3_bool + p.size = size_ref_bool + } + case reflect.Int32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_int32 + } else { + p.enc = (*Buffer).enc_ref_int32 + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_int32 + } + case reflect.Uint32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 + p.dec = (*Buffer).dec_proto3_int32 // can reuse + p.size = size_ref_uint32 + } + case reflect.Int64, reflect.Uint64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.Float32: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_proto3_uint32 + } else { + p.enc = (*Buffer).enc_ref_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int32 + p.size = size_ref_uint32 + } + case reflect.Float64: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_proto3_int64 + } else { + p.enc = (*Buffer).enc_ref_int64 // can just treat them as bits + p.dec = (*Buffer).dec_proto3_int64 + p.size = size_ref_int64 + } + case reflect.String: + if p.proto3 { + p.enc = (*Buffer).enc_proto3_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_proto3_string + } else { + p.enc = (*Buffer).enc_ref_string + p.dec = (*Buffer).dec_proto3_string + p.size = size_ref_string + } + case reflect.Struct: + p.stype = typ + p.isMarshaler = isMarshaler(typ) + p.isUnmarshaler = isUnmarshaler(typ) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_ref_struct_message + p.dec = (*Buffer).dec_ref_struct_message + p.size = size_ref_struct_message + } else { + fmt.Fprintf(os.Stderr, "proto: no coders for struct %T\n", typ) + } + + case reflect.Ptr: + switch t2 := t1.Elem(); t2.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2) + break + case reflect.Bool: + p.enc = (*Buffer).enc_bool + p.dec = (*Buffer).dec_bool + p.size = size_bool + case reflect.Int32: + p.enc = (*Buffer).enc_int32 + p.dec = (*Buffer).dec_int32 + p.size = size_int32 + case reflect.Uint32: + p.enc = (*Buffer).enc_uint32 + p.dec = (*Buffer).dec_int32 // can reuse + p.size = size_uint32 + case reflect.Int64, reflect.Uint64: + p.enc = (*Buffer).enc_int64 + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.Float32: + p.enc = (*Buffer).enc_uint32 // can just treat them as bits + p.dec = (*Buffer).dec_int32 + p.size = size_uint32 + case reflect.Float64: + p.enc = (*Buffer).enc_int64 // can just treat them as bits + p.dec = (*Buffer).dec_int64 + p.size = size_int64 + case reflect.String: + p.enc = (*Buffer).enc_string + p.dec = (*Buffer).dec_string + p.size = size_string + case reflect.Struct: + p.stype = t1.Elem() + p.isMarshaler = isMarshaler(t1) + p.isUnmarshaler = isUnmarshaler(t1) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_struct_message + p.dec = (*Buffer).dec_struct_message + p.size = size_struct_message + } else { + p.enc = (*Buffer).enc_struct_group + p.dec = (*Buffer).dec_struct_group + p.size = size_struct_group + } + } + + case reflect.Slice: + switch t2 := t1.Elem(); t2.Kind() { + default: + logNoSliceEnc(t1, t2) + break + case reflect.Bool: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_bool + p.size = size_slice_packed_bool + } else { + p.enc = (*Buffer).enc_slice_bool + p.size = size_slice_bool + } + p.dec = (*Buffer).dec_slice_bool + p.packedDec = (*Buffer).dec_slice_packed_bool + case reflect.Int32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int32 + p.size = size_slice_packed_int32 + } else { + p.enc = (*Buffer).enc_slice_int32 + p.size = size_slice_int32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Uint32: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case reflect.Int64, reflect.Uint64: + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + case reflect.Uint8: + p.dec = (*Buffer).dec_slice_byte + if p.proto3 { + p.enc = (*Buffer).enc_proto3_slice_byte + p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte + } + case reflect.Float32, reflect.Float64: + switch t2.Bits() { + case 32: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_uint32 + p.size = size_slice_packed_uint32 + } else { + p.enc = (*Buffer).enc_slice_uint32 + p.size = size_slice_uint32 + } + p.dec = (*Buffer).dec_slice_int32 + p.packedDec = (*Buffer).dec_slice_packed_int32 + case 64: + // can just treat them as bits + if p.Packed { + p.enc = (*Buffer).enc_slice_packed_int64 + p.size = size_slice_packed_int64 + } else { + p.enc = (*Buffer).enc_slice_int64 + p.size = size_slice_int64 + } + p.dec = (*Buffer).dec_slice_int64 + p.packedDec = (*Buffer).dec_slice_packed_int64 + default: + logNoSliceEnc(t1, t2) + break + } + case reflect.String: + p.enc = (*Buffer).enc_slice_string + p.dec = (*Buffer).dec_slice_string + p.size = size_slice_string + case reflect.Ptr: + switch t3 := t2.Elem(); t3.Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3) + break + case reflect.Struct: + p.stype = t2.Elem() + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + if p.Wire == "bytes" { + p.enc = (*Buffer).enc_slice_struct_message + p.dec = (*Buffer).dec_slice_struct_message + p.size = size_slice_struct_message + } else { + p.enc = (*Buffer).enc_slice_struct_group + p.dec = (*Buffer).dec_slice_struct_group + p.size = size_slice_struct_group + } + } + case reflect.Slice: + switch t2.Elem().Kind() { + default: + fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem()) + break + case reflect.Uint8: + p.enc = (*Buffer).enc_slice_slice_byte + p.dec = (*Buffer).dec_slice_slice_byte + p.size = size_slice_slice_byte + } + case reflect.Struct: + p.setSliceOfNonPointerStructs(t1) + } + + case reflect.Map: + p.enc = (*Buffer).enc_new_map + p.dec = (*Buffer).dec_new_map + p.size = size_new_map + + p.mtype = t1 + p.mkeyprop = &Properties{} + p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.mvalprop = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime + p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + p.setTag(lockGetProp) +} + +func (p *Properties) setTag(lockGetProp bool) { + // precalculate tag code + wire := p.WireType + if p.Packed { + wire = WireBytes + } + x := uint32(p.Tag)<<3 | uint32(wire) + i := 0 + for i = 0; x > 127; i++ { + p.tagbuf[i] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + p.tagbuf[i] = uint8(x) + p.tagcode = p.tagbuf[0 : i+1] + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() +) + +// isMarshaler reports whether type t implements Marshaler. +func isMarshaler(t reflect.Type) bool { + return t.Implements(marshalerType) +} + +// isUnmarshaler reports whether type t implements Unmarshaler. +func isUnmarshaler(t reflect.Type) bool { + return t.Implements(unmarshalerType) +} + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if f != nil { + p.field = toField(f) + } + if tag == "" { + return + } + p.Parse(tag) + p.setEncAndDec(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + if collectStats { + stats.Chit++ + } + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + if collectStats { + stats.Chit++ + } + return prop + } + if collectStats { + stats.Cmiss++ + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType) || + reflect.PtrTo(t).Implements(extendableProtoV1Type) || + reflect.PtrTo(t).Implements(extendableBytesType) + prop.unrecField = invalidField + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + isOneofMessage := false + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + if f.Name == "XXX_InternalExtensions" { // special case + p.enc = (*Buffer).enc_exts + p.dec = nil // not needed + p.size = size_exts + } else if f.Name == "XXX_extensions" { // special case + if len(f.Tag.Get("protobuf")) > 0 { + p.enc = (*Buffer).enc_ext_slice_byte + p.dec = nil // not needed + p.size = size_ext_slice_byte + } else { + p.enc = (*Buffer).enc_map + p.dec = nil // not needed + p.size = size_map + } + } else if f.Name == "XXX_unrecognized" { // special case + prop.unrecField = toField(&f) + } + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + isOneofMessage = true + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") && oneof == "" { + fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + type oneofMessage interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + if om, ok := reflect.Zero(reflect.PtrTo(t)).Interface().(oneofMessage); isOneofMessage && ok { + var oots []interface{} + prop.oneofMarshaler, prop.oneofUnmarshaler, prop.oneofSizer, oots = om.XXX_OneofFuncs() + prop.stype = t + + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// Return the Properties object for the x[0]'th field of the structure. +func propByIndex(t reflect.Type, x []int) *Properties { + if len(x) != 1 { + fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t) + return nil + } + prop := GetProperties(t) + return prop.Prop[x[0]] +} + +// Get the address and type of a pointer to a struct from an interface. +func getbase(pb Message) (t reflect.Type, b structPointer, err error) { + if pb == nil { + err = ErrNil + return + } + // get the reflect type of the pointer to the struct. + t = reflect.TypeOf(pb) + // get the address of the struct. + value := reflect.ValueOf(pb) + b = toStructPointer(value) + return +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) +var enumStringMaps = make(map[string]map[int32]string) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap + if _, ok := enumStringMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumStringMaps[typeName] = unusedNameMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypes = make(map[string]reflect.Type) + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypes[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +func MessageType(name string) reflect.Type { return protoTypes[name] } + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go new file mode 100644 index 0000000..b6b7176 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -0,0 +1,111 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "os" + "reflect" +) + +func (p *Properties) setCustomEncAndDec(typ reflect.Type) { + p.ctype = typ + if p.Repeated { + p.enc = (*Buffer).enc_custom_slice_bytes + p.dec = (*Buffer).dec_custom_slice_bytes + p.size = size_custom_slice_bytes + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_custom_bytes + p.dec = (*Buffer).dec_custom_bytes + p.size = size_custom_bytes + } else { + p.enc = (*Buffer).enc_custom_ref_bytes + p.dec = (*Buffer).dec_custom_ref_bytes + p.size = size_custom_ref_bytes + } +} + +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + +func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { + t2 := typ.Elem() + p.sstype = typ + p.stype = t2 + p.isMarshaler = isMarshaler(t2) + p.isUnmarshaler = isUnmarshaler(t2) + p.enc = (*Buffer).enc_slice_ref_struct_message + p.dec = (*Buffer).dec_slice_ref_struct_message + p.size = size_slice_ref_struct_message + if p.Wire != "bytes" { + fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T \n", typ, t2) + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go new file mode 100644 index 0000000..5a5fd93 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -0,0 +1,119 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "io" +) + +func Skip(data []byte) (n int, err error) { + l := len(data) + index := 0 + for index < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + index++ + if data[index-1] < 0x80 { + break + } + } + return index, nil + case 1: + index += 8 + return index, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + index += length + return index, nil + case 3: + for { + var innerWire uint64 + var start int = index + for shift := uint(0); ; shift += 7 { + if index >= l { + return 0, io.ErrUnexpectedEOF + } + b := data[index] + index++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := Skip(data[start:]) + if err != nil { + return 0, err + } + index = start + next + } + return index, nil + case 4: + return index, nil + case 5: + index += 4 + return index, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go new file mode 100644 index 0000000..952f977 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -0,0 +1,928 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" + "sync" + "time" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + gtNewline = []byte(">\n") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +// raw is the interface satisfied by RawMessage. +type raw interface { + Bytes() []byte +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, v, props); err != nil { + return err + } + } else if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.mvalprop); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if props.proto3 && fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if b, ok := fv.Interface().(raw); ok { + if err := writeRaw(w, b.Bytes()); err != nil { + return err + } + continue + } + + if len(props.Enum) > 0 { + if err := tm.writeEnum(w, fv, props); err != nil { + return err + } + } else if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv + if pv.CanAddr() { + pv = sv.Addr() + } else { + pv = reflect.New(sv.Type()) + pv.Elem().Set(sv) + } + if pv.Type().Implements(extensionRangeType) { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +// writeRaw writes an uninterpreted raw message. +func writeRaw(w *textWriter, b []byte) error { + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if err := writeUnknownStruct(w, b); err != nil { + return err + } + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + return nil +} + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) + if err != nil { + return err + } + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) + } + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err + } + } + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else if err := tm.writeStruct(w, v); err != nil { + return err + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, ferr := fmt.Fprintf(w, "/* %v */\n", err) + return ferr + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, werr := w.Write(endBraceNewline); werr != nil { + return werr + } + continue + } + if _, ferr := fmt.Fprint(w, tag); ferr != nil { + return ferr + } + if wire != WireStartGroup { + if err = w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err = w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + e := pv.Interface().(Message) + + var m map[int32]Extension + var mu sync.Locker + if em, ok := e.(extensionsBytes); ok { + eb := em.GetExtensions() + var err error + m, err = BytesToExtensionsMap(*eb) + if err != nil { + return err + } + mu = notLocker{} + } else if _, ok := e.(extendableProto); ok { + ep, _ := extendable(e) + m, mu = ep.extensionsRead() + if m == nil { + return nil + } + } + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(e, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go new file mode 100644 index 0000000..1d6c6aa --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -0,0 +1,57 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" +) + +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { + m, ok := enumStringMaps[props.Enum] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + key := int32(0) + if v.Kind() == reflect.Ptr { + key = int32(v.Elem().Int()) + } else { + key = int32(v.Int()) + } + s, ok := m[key] + if !ok { + if err := tm.writeAny(w, v, props); err != nil { + return err + } + } + _, err := fmt.Fprint(w, s) + return err +} diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go new file mode 100644 index 0000000..f127672 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -0,0 +1,1013 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") + errBadHex = errors.New("proto: bad hexadecimal") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + base := 8 + ss := s[:2] + s = s[2:] + if r == 'x' || r == 'X' { + base = 16 + } else { + ss = string(r) + ss + } + i, err := strconv.ParseUint(ss, base, 8) + if err != nil { + return "", "", err + } + return string([]byte{byte(i)}), s, nil + case 'u', 'U': + n := 4 + if r == 'U' { + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d digits`, r, n) + } + + bs := make([]byte, n/2) + for i := 0; i < n; i += 2 { + a, ok1 := unhex(s[i]) + b, ok2 := unhex(s[i+1]) + if !ok1 || !ok2 { + return "", "", errBadHex + } + bs[i/2] = a<<4 | b + } + s = s[n:] + return string(bs), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Adapted from src/pkg/strconv/quote.go. +func unhex(b byte) (v byte, ok bool) { + switch { + case '0' <= b && b <= '9': + return b - '0', true + case 'a' <= b && b <= 'f': + return b - 'a' + 10, true + case 'A' <= b && b <= 'F': + return b - 'A' + 10, true + } + return 0, false +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.mkeyprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.mvalprop, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.mvalprop); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + if len(props.CustomType) > 0 { + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + tc := reflect.TypeOf(new(Marshaler)) + ok := t.Elem().Implements(tc.Elem()) + if ok { + fv := v + flen := fv.Len() + if flen == fv.Cap() { + nav := reflect.MakeSlice(v.Type(), flen, 2*flen+1) + reflect.Copy(nav, fv) + fv.Set(nav) + } + fv.SetLen(flen + 1) + + // Read one. + p.back() + return p.readAny(fv.Index(flen), props) + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + custom := reflect.New(props.ctype.Elem()).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.ValueOf(custom)) + } else { + custom := reflect.New(reflect.TypeOf(v.Interface())).Interface().(Unmarshaler) + err := custom.Unmarshal([]byte(tok.unquoted)) + if err != nil { + return p.errorf("%v %v: %v", err, v.Type(), tok.value) + } + v.Set(reflect.Indirect(reflect.ValueOf(custom))) + } + return nil + } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + ntok := p.next() + if ntok.err != nil { + return ntok.err + } + if ntok.value == "]" { + break + } + if ntok.value != "," { + return p.errorf("Expected ']' or ',' found %q", ntok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(x) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + err := um.UnmarshalText([]byte(s)) + return err + } + pb.Reset() + v := reflect.ValueOf(pb) + if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil { + return pe + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 0000000..9324f65 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 0000000..d427647 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/golang/snappy/decode.go b/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 0000000..72efb03 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/vendor/github.com/golang/snappy/decode_amd64.go b/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 0000000..fcd192b --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/vendor/github.com/golang/snappy/decode_amd64.s b/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 0000000..e6179f6 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/decode_other.go b/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 0000000..8c9f204 --- /dev/null +++ b/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/vendor/github.com/golang/snappy/encode.go b/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 0000000..8d393e9 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/vendor/github.com/golang/snappy/encode_amd64.go b/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 0000000..150d91b --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/vendor/github.com/golang/snappy/encode_amd64.s b/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 0000000..adfd979 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/vendor/github.com/golang/snappy/encode_other.go b/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 0000000..dbcae90 --- /dev/null +++ b/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/vendor/github.com/golang/snappy/snappy.go b/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 0000000..0cf5e37 --- /dev/null +++ b/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,87 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the snappy block-based compression format. +// It aims for very high speeds and reasonable compression. +// +// The C++ snappy implementation is at https://github.com/google/snappy +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/vendor/github.com/google/gops/agent/agent.go b/vendor/github.com/google/gops/agent/agent.go new file mode 100644 index 0000000..5708391 --- /dev/null +++ b/vendor/github.com/google/gops/agent/agent.go @@ -0,0 +1,237 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package agent provides hooks programs can register to retrieve +// diagnostics data by using gops. +package agent + +import ( + "fmt" + "io" + "io/ioutil" + "net" + "os" + gosignal "os/signal" + "runtime" + "runtime/pprof" + "runtime/trace" + "strconv" + "sync" + "time" + + "bufio" + + "github.com/google/gops/internal" + "github.com/google/gops/signal" + "github.com/kardianos/osext" +) + +const defaultAddr = "127.0.0.1:0" + +var ( + mu sync.Mutex + portfile string + listener net.Listener + + units = []string{" bytes", "KB", "MB", "GB", "TB", "PB"} +) + +// Options allows configuring the started agent. +type Options struct { + // Addr is the host:port the agent will be listening at. + // Optional. + Addr string + + // NoShutdownCleanup tells the agent not to automatically cleanup + // resources if the running process receives an interrupt. + // Optional. + NoShutdownCleanup bool +} + +// Listen starts the gops agent on a host process. Once agent started, users +// can use the advanced gops features. The agent will listen to Interrupt +// signals and exit the process, if you need to perform further work on the +// Interrupt signal use the options parameter to configure the agent +// accordingly. +// +// Note: The agent exposes an endpoint via a TCP connection that can be used by +// any program on the system. Review your security requirements before starting +// the agent. +func Listen(opts *Options) error { + mu.Lock() + defer mu.Unlock() + + if opts == nil { + opts = &Options{} + } + if portfile != "" { + return fmt.Errorf("gops: agent already listening at: %v", listener.Addr()) + } + + gopsdir, err := internal.ConfigDir() + if err != nil { + return err + } + err = os.MkdirAll(gopsdir, os.ModePerm) + if err != nil { + return err + } + if !opts.NoShutdownCleanup { + gracefulShutdown() + } + + addr := opts.Addr + if addr == "" { + addr = defaultAddr + } + ln, err := net.Listen("tcp", addr) + if err != nil { + return err + } + listener = ln + port := listener.Addr().(*net.TCPAddr).Port + portfile = fmt.Sprintf("%s/%d", gopsdir, os.Getpid()) + err = ioutil.WriteFile(portfile, []byte(strconv.Itoa(port)), os.ModePerm) + if err != nil { + return err + } + + go listen() + return nil +} + +func listen() { + buf := make([]byte, 1) + for { + fd, err := listener.Accept() + if err != nil { + fmt.Fprintf(os.Stderr, "gops: %v", err) + if netErr, ok := err.(net.Error); ok && !netErr.Temporary() { + break + } + continue + } + if _, err := fd.Read(buf); err != nil { + fmt.Fprintf(os.Stderr, "gops: %v", err) + continue + } + if err := handle(fd, buf); err != nil { + fmt.Fprintf(os.Stderr, "gops: %v", err) + continue + } + fd.Close() + } +} + +func gracefulShutdown() { + c := make(chan os.Signal, 1) + gosignal.Notify(c, os.Interrupt) + go func() { + // cleanup the socket on shutdown. + <-c + Close() + os.Exit(1) + }() +} + +// Close closes the agent, removing temporary files and closing the TCP listener. +// If no agent is listening, Close does nothing. +func Close() { + mu.Lock() + defer mu.Unlock() + + if portfile != "" { + os.Remove(portfile) + portfile = "" + } + if listener != nil { + listener.Close() + } +} + +func formatBytes(val uint64) string { + var i int + var target uint64 + for i = range units { + target = 1 << uint(10*(i+1)) + if val < target { + break + } + } + if i > 0 { + return fmt.Sprintf("%0.2f%s (%d bytes)", float64(val)/(float64(target)/1024), units[i], val) + } + return fmt.Sprintf("%d bytes", val) +} + +func handle(conn io.Writer, msg []byte) error { + switch msg[0] { + case signal.StackTrace: + return pprof.Lookup("goroutine").WriteTo(conn, 2) + case signal.GC: + runtime.GC() + _, err := conn.Write([]byte("ok")) + return err + case signal.MemStats: + var s runtime.MemStats + runtime.ReadMemStats(&s) + fmt.Fprintf(conn, "alloc: %v\n", formatBytes(s.Alloc)) + fmt.Fprintf(conn, "total-alloc: %v\n", formatBytes(s.TotalAlloc)) + fmt.Fprintf(conn, "sys: %v\n", formatBytes(s.Sys)) + fmt.Fprintf(conn, "lookups: %v\n", s.Lookups) + fmt.Fprintf(conn, "mallocs: %v\n", s.Mallocs) + fmt.Fprintf(conn, "frees: %v\n", s.Frees) + fmt.Fprintf(conn, "heap-alloc: %v\n", formatBytes(s.HeapAlloc)) + fmt.Fprintf(conn, "heap-sys: %v\n", formatBytes(s.HeapSys)) + fmt.Fprintf(conn, "heap-idle: %v\n", formatBytes(s.HeapIdle)) + fmt.Fprintf(conn, "heap-in-use: %v\n", formatBytes(s.HeapInuse)) + fmt.Fprintf(conn, "heap-released: %v\n", formatBytes(s.HeapReleased)) + fmt.Fprintf(conn, "heap-objects: %v\n", s.HeapObjects) + fmt.Fprintf(conn, "stack-in-use: %v\n", formatBytes(s.StackInuse)) + fmt.Fprintf(conn, "stack-sys: %v\n", formatBytes(s.StackSys)) + fmt.Fprintf(conn, "next-gc: when heap-alloc >= %v\n", formatBytes(s.NextGC)) + lastGC := "-" + if s.LastGC != 0 { + lastGC = fmt.Sprint(time.Unix(0, int64(s.LastGC))) + } + fmt.Fprintf(conn, "last-gc: %v\n", lastGC) + fmt.Fprintf(conn, "gc-pause: %v\n", time.Duration(s.PauseTotalNs)) + fmt.Fprintf(conn, "num-gc: %v\n", s.NumGC) + fmt.Fprintf(conn, "enable-gc: %v\n", s.EnableGC) + fmt.Fprintf(conn, "debug-gc: %v\n", s.DebugGC) + case signal.Version: + fmt.Fprintf(conn, "%v\n", runtime.Version()) + case signal.HeapProfile: + pprof.WriteHeapProfile(conn) + case signal.CPUProfile: + if err := pprof.StartCPUProfile(conn); err != nil { + return err + } + time.Sleep(30 * time.Second) + pprof.StopCPUProfile() + case signal.Stats: + fmt.Fprintf(conn, "goroutines: %v\n", runtime.NumGoroutine()) + fmt.Fprintf(conn, "OS threads: %v\n", pprof.Lookup("threadcreate").Count()) + fmt.Fprintf(conn, "GOMAXPROCS: %v\n", runtime.GOMAXPROCS(0)) + fmt.Fprintf(conn, "num CPU: %v\n", runtime.NumCPU()) + case signal.BinaryDump: + path, err := osext.Executable() + if err != nil { + return err + } + f, err := os.Open(path) + if err != nil { + return err + } + defer f.Close() + + _, err = bufio.NewReader(f).WriteTo(conn) + return err + case signal.Trace: + trace.Start(conn) + time.Sleep(5 * time.Second) + trace.Stop() + } + return nil +} diff --git a/vendor/github.com/google/gops/internal/internal.go b/vendor/github.com/google/gops/internal/internal.go new file mode 100644 index 0000000..1382822 --- /dev/null +++ b/vendor/github.com/google/gops/internal/internal.go @@ -0,0 +1,52 @@ +package internal + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "os/user" + "path/filepath" + "runtime" + "strings" +) + +func ConfigDir() (string, error) { + if runtime.GOOS == "windows" { + return filepath.Join(os.Getenv("APPDATA"), "gops"), nil + } + homeDir := guessUnixHomeDir() + if homeDir == "" { + return "", errors.New("unable to get current user home directory: os/user lookup failed; $HOME is empty") + } + return filepath.Join(homeDir, ".config", "gops"), nil +} + +func guessUnixHomeDir() string { + usr, err := user.Current() + if err == nil { + return usr.HomeDir + } + return os.Getenv("HOME") +} + +func PIDFile(pid int) (string, error) { + gopsdir, err := ConfigDir() + if err != nil { + return "", err + } + return fmt.Sprintf("%s/%d", gopsdir, pid), nil +} + +func GetPort(pid int) (string, error) { + portfile, err := PIDFile(pid) + if err != nil { + return "", err + } + b, err := ioutil.ReadFile(portfile) + if err != nil { + return "", err + } + port := strings.TrimSpace(string(b)) + return port, nil +} diff --git a/vendor/github.com/google/gops/signal/signal.go b/vendor/github.com/google/gops/signal/signal.go new file mode 100644 index 0000000..b2bfbe1 --- /dev/null +++ b/vendor/github.com/google/gops/signal/signal.go @@ -0,0 +1,35 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package signal contains signals used to communicate to the gops agents. +package signal + +const ( + // StackTrace represents a command to print stack trace. + StackTrace = byte(0x1) + + // GC runs the garbage collector. + GC = byte(0x2) + + // MemStats reports memory stats. + MemStats = byte(0x3) + + // Version prints the Go version. + Version = byte(0x4) + + // HeapProfile starts `go tool pprof` with the current memory profile. + HeapProfile = byte(0x5) + + // CPUProfile starts `go tool pprof` with the current CPU profile + CPUProfile = byte(0x6) + + // Stats returns Go runtime statistics such as number of goroutines, GOMAXPROCS, and NumCPU. + Stats = byte(0x7) + + // Trace starts the Go execution tracer, waits 5 seconds and launches the trace tool. + Trace = byte(0x8) + + // BinaryDump returns running binary file. + BinaryDump = byte(0x9) +) diff --git a/vendor/github.com/gorilla/websocket/client.go b/vendor/github.com/gorilla/websocket/client.go new file mode 100644 index 0000000..43a87c7 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client.go @@ -0,0 +1,392 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "bytes" + "crypto/tls" + "encoding/base64" + "errors" + "io" + "io/ioutil" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// ErrBadHandshake is returned when the server response to opening handshake is +// invalid. +var ErrBadHandshake = errors.New("websocket: bad handshake") + +var errInvalidCompression = errors.New("websocket: invalid compression negotiation") + +// NewClient creates a new client connection using the given net connection. +// The URL u specifies the host and request URI. Use requestHeader to specify +// the origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies +// (Cookie). Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etc. +// +// Deprecated: Use Dialer instead. +func NewClient(netConn net.Conn, u *url.URL, requestHeader http.Header, readBufSize, writeBufSize int) (c *Conn, response *http.Response, err error) { + d := Dialer{ + ReadBufferSize: readBufSize, + WriteBufferSize: writeBufSize, + NetDial: func(net, addr string) (net.Conn, error) { + return netConn, nil + }, + } + return d.Dial(u.String(), requestHeader) +} + +// A Dialer contains options for connecting to WebSocket server. +type Dialer struct { + // NetDial specifies the dial function for creating TCP connections. If + // NetDial is nil, net.Dial is used. + NetDial func(network, addr string) (net.Conn, error) + + // Proxy specifies a function to return a proxy for a given + // Request. If the function returns a non-nil error, the + // request is aborted with the provided error. + // If Proxy is nil or returns a nil *URL, no proxy is used. + Proxy func(*http.Request) (*url.URL, error) + + // TLSClientConfig specifies the TLS configuration to use with tls.Client. + // If nil, the default configuration is used. + TLSClientConfig *tls.Config + + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then a useful default size is used. The I/O buffer sizes + // do not limit the size of the messages that can be sent or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the client's requested subprotocols. + Subprotocols []string + + // EnableCompression specifies if the client should attempt to negotiate + // per message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool + + // Jar specifies the cookie jar. + // If Jar is nil, cookies are not sent in requests and ignored + // in responses. + Jar http.CookieJar +} + +var errMalformedURL = errors.New("malformed ws or wss URL") + +// parseURL parses the URL. +// +// This function is a replacement for the standard library url.Parse function. +// In Go 1.4 and earlier, url.Parse loses information from the path. +func parseURL(s string) (*url.URL, error) { + // From the RFC: + // + // ws-URI = "ws:" "//" host [ ":" port ] path [ "?" query ] + // wss-URI = "wss:" "//" host [ ":" port ] path [ "?" query ] + var u url.URL + switch { + case strings.HasPrefix(s, "ws://"): + u.Scheme = "ws" + s = s[len("ws://"):] + case strings.HasPrefix(s, "wss://"): + u.Scheme = "wss" + s = s[len("wss://"):] + default: + return nil, errMalformedURL + } + + if i := strings.Index(s, "?"); i >= 0 { + u.RawQuery = s[i+1:] + s = s[:i] + } + + if i := strings.Index(s, "/"); i >= 0 { + u.Opaque = s[i:] + s = s[:i] + } else { + u.Opaque = "/" + } + + u.Host = s + + if strings.Contains(u.Host, "@") { + // Don't bother parsing user information because user information is + // not allowed in websocket URIs. + return nil, errMalformedURL + } + + return &u, nil +} + +func hostPortNoPort(u *url.URL) (hostPort, hostNoPort string) { + hostPort = u.Host + hostNoPort = u.Host + if i := strings.LastIndex(u.Host, ":"); i > strings.LastIndex(u.Host, "]") { + hostNoPort = hostNoPort[:i] + } else { + switch u.Scheme { + case "wss": + hostPort += ":443" + case "https": + hostPort += ":443" + default: + hostPort += ":80" + } + } + return hostPort, hostNoPort +} + +// DefaultDialer is a dialer with all fields set to the default zero values. +var DefaultDialer = &Dialer{ + Proxy: http.ProxyFromEnvironment, +} + +// Dial creates a new client connection. Use requestHeader to specify the +// origin (Origin), subprotocols (Sec-WebSocket-Protocol) and cookies (Cookie). +// Use the response.Header to get the selected subprotocol +// (Sec-WebSocket-Protocol) and cookies (Set-Cookie). +// +// If the WebSocket handshake fails, ErrBadHandshake is returned along with a +// non-nil *http.Response so that callers can handle redirects, authentication, +// etcetera. The response body may not contain the entire response and does not +// need to be closed by the application. +func (d *Dialer) Dial(urlStr string, requestHeader http.Header) (*Conn, *http.Response, error) { + + if d == nil { + d = &Dialer{ + Proxy: http.ProxyFromEnvironment, + } + } + + challengeKey, err := generateChallengeKey() + if err != nil { + return nil, nil, err + } + + u, err := parseURL(urlStr) + if err != nil { + return nil, nil, err + } + + switch u.Scheme { + case "ws": + u.Scheme = "http" + case "wss": + u.Scheme = "https" + default: + return nil, nil, errMalformedURL + } + + if u.User != nil { + // User name and password are not allowed in websocket URIs. + return nil, nil, errMalformedURL + } + + req := &http.Request{ + Method: "GET", + URL: u, + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Header: make(http.Header), + Host: u.Host, + } + + // Set the cookies present in the cookie jar of the dialer + if d.Jar != nil { + for _, cookie := range d.Jar.Cookies(u) { + req.AddCookie(cookie) + } + } + + // Set the request headers using the capitalization for names and values in + // RFC examples. Although the capitalization shouldn't matter, there are + // servers that depend on it. The Header.Set method is not used because the + // method canonicalizes the header names. + req.Header["Upgrade"] = []string{"websocket"} + req.Header["Connection"] = []string{"Upgrade"} + req.Header["Sec-WebSocket-Key"] = []string{challengeKey} + req.Header["Sec-WebSocket-Version"] = []string{"13"} + if len(d.Subprotocols) > 0 { + req.Header["Sec-WebSocket-Protocol"] = []string{strings.Join(d.Subprotocols, ", ")} + } + for k, vs := range requestHeader { + switch { + case k == "Host": + if len(vs) > 0 { + req.Host = vs[0] + } + case k == "Upgrade" || + k == "Connection" || + k == "Sec-Websocket-Key" || + k == "Sec-Websocket-Version" || + k == "Sec-Websocket-Extensions" || + (k == "Sec-Websocket-Protocol" && len(d.Subprotocols) > 0): + return nil, nil, errors.New("websocket: duplicate header not allowed: " + k) + default: + req.Header[k] = vs + } + } + + if d.EnableCompression { + req.Header.Set("Sec-Websocket-Extensions", "permessage-deflate; server_no_context_takeover; client_no_context_takeover") + } + + hostPort, hostNoPort := hostPortNoPort(u) + + var proxyURL *url.URL + // Check wether the proxy method has been configured + if d.Proxy != nil { + proxyURL, err = d.Proxy(req) + } + if err != nil { + return nil, nil, err + } + + var targetHostPort string + if proxyURL != nil { + targetHostPort, _ = hostPortNoPort(proxyURL) + } else { + targetHostPort = hostPort + } + + var deadline time.Time + if d.HandshakeTimeout != 0 { + deadline = time.Now().Add(d.HandshakeTimeout) + } + + netDial := d.NetDial + if netDial == nil { + netDialer := &net.Dialer{Deadline: deadline} + netDial = netDialer.Dial + } + + netConn, err := netDial("tcp", targetHostPort) + if err != nil { + return nil, nil, err + } + + defer func() { + if netConn != nil { + netConn.Close() + } + }() + + if err := netConn.SetDeadline(deadline); err != nil { + return nil, nil, err + } + + if proxyURL != nil { + connectHeader := make(http.Header) + if user := proxyURL.User; user != nil { + proxyUser := user.Username() + if proxyPassword, passwordSet := user.Password(); passwordSet { + credential := base64.StdEncoding.EncodeToString([]byte(proxyUser + ":" + proxyPassword)) + connectHeader.Set("Proxy-Authorization", "Basic "+credential) + } + } + connectReq := &http.Request{ + Method: "CONNECT", + URL: &url.URL{Opaque: hostPort}, + Host: hostPort, + Header: connectHeader, + } + + connectReq.Write(netConn) + + // Read response. + // Okay to use and discard buffered reader here, because + // TLS server will not speak until spoken to. + br := bufio.NewReader(netConn) + resp, err := http.ReadResponse(br, connectReq) + if err != nil { + return nil, nil, err + } + if resp.StatusCode != 200 { + f := strings.SplitN(resp.Status, " ", 2) + return nil, nil, errors.New(f[1]) + } + } + + if u.Scheme == "https" { + cfg := cloneTLSConfig(d.TLSClientConfig) + if cfg.ServerName == "" { + cfg.ServerName = hostNoPort + } + tlsConn := tls.Client(netConn, cfg) + netConn = tlsConn + if err := tlsConn.Handshake(); err != nil { + return nil, nil, err + } + if !cfg.InsecureSkipVerify { + if err := tlsConn.VerifyHostname(cfg.ServerName); err != nil { + return nil, nil, err + } + } + } + + conn := newConn(netConn, false, d.ReadBufferSize, d.WriteBufferSize) + + if err := req.Write(netConn); err != nil { + return nil, nil, err + } + + resp, err := http.ReadResponse(conn.br, req) + if err != nil { + return nil, nil, err + } + + if d.Jar != nil { + if rc := resp.Cookies(); len(rc) > 0 { + d.Jar.SetCookies(u, rc) + } + } + + if resp.StatusCode != 101 || + !strings.EqualFold(resp.Header.Get("Upgrade"), "websocket") || + !strings.EqualFold(resp.Header.Get("Connection"), "upgrade") || + resp.Header.Get("Sec-Websocket-Accept") != computeAcceptKey(challengeKey) { + // Before closing the network connection on return from this + // function, slurp up some of the response to aid application + // debugging. + buf := make([]byte, 1024) + n, _ := io.ReadFull(resp.Body, buf) + resp.Body = ioutil.NopCloser(bytes.NewReader(buf[:n])) + return nil, resp, ErrBadHandshake + } + + for _, ext := range parseExtensions(resp.Header) { + if ext[""] != "permessage-deflate" { + continue + } + _, snct := ext["server_no_context_takeover"] + _, cnct := ext["client_no_context_takeover"] + if !snct || !cnct { + return nil, resp, errInvalidCompression + } + conn.newCompressionWriter = compressNoContextTakeover + conn.newDecompressionReader = decompressNoContextTakeover + break + } + + resp.Body = ioutil.NopCloser(bytes.NewReader([]byte{})) + conn.subprotocol = resp.Header.Get("Sec-Websocket-Protocol") + + netConn.SetDeadline(time.Time{}) + netConn = nil // to avoid close in defer. + return conn, resp, nil +} diff --git a/vendor/github.com/gorilla/websocket/client_clone.go b/vendor/github.com/gorilla/websocket/client_clone.go new file mode 100644 index 0000000..4f0d943 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.8 + +package websocket + +import "crypto/tls" + +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return cfg.Clone() +} diff --git a/vendor/github.com/gorilla/websocket/client_clone_legacy.go b/vendor/github.com/gorilla/websocket/client_clone_legacy.go new file mode 100644 index 0000000..babb007 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/client_clone_legacy.go @@ -0,0 +1,38 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8 + +package websocket + +import "crypto/tls" + +// cloneTLSConfig clones all public fields except the fields +// SessionTicketsDisabled and SessionTicketKey. This avoids copying the +// sync.Mutex in the sync.Once and makes it safe to call cloneTLSConfig on a +// config in active use. +func cloneTLSConfig(cfg *tls.Config) *tls.Config { + if cfg == nil { + return &tls.Config{} + } + return &tls.Config{ + Rand: cfg.Rand, + Time: cfg.Time, + Certificates: cfg.Certificates, + NameToCertificate: cfg.NameToCertificate, + GetCertificate: cfg.GetCertificate, + RootCAs: cfg.RootCAs, + NextProtos: cfg.NextProtos, + ServerName: cfg.ServerName, + ClientAuth: cfg.ClientAuth, + ClientCAs: cfg.ClientCAs, + InsecureSkipVerify: cfg.InsecureSkipVerify, + CipherSuites: cfg.CipherSuites, + PreferServerCipherSuites: cfg.PreferServerCipherSuites, + ClientSessionCache: cfg.ClientSessionCache, + MinVersion: cfg.MinVersion, + MaxVersion: cfg.MaxVersion, + CurvePreferences: cfg.CurvePreferences, + } +} diff --git a/vendor/github.com/gorilla/websocket/compression.go b/vendor/github.com/gorilla/websocket/compression.go new file mode 100644 index 0000000..813ffb1 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/compression.go @@ -0,0 +1,148 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "compress/flate" + "errors" + "io" + "strings" + "sync" +) + +const ( + minCompressionLevel = -2 // flate.HuffmanOnly not defined in Go < 1.6 + maxCompressionLevel = flate.BestCompression + defaultCompressionLevel = 1 +) + +var ( + flateWriterPools [maxCompressionLevel - minCompressionLevel + 1]sync.Pool + flateReaderPool = sync.Pool{New: func() interface{} { + return flate.NewReader(nil) + }} +) + +func decompressNoContextTakeover(r io.Reader) io.ReadCloser { + const tail = + // Add four bytes as specified in RFC + "\x00\x00\xff\xff" + + // Add final block to squelch unexpected EOF error from flate reader. + "\x01\x00\x00\xff\xff" + + fr, _ := flateReaderPool.Get().(io.ReadCloser) + fr.(flate.Resetter).Reset(io.MultiReader(r, strings.NewReader(tail)), nil) + return &flateReadWrapper{fr} +} + +func isValidCompressionLevel(level int) bool { + return minCompressionLevel <= level && level <= maxCompressionLevel +} + +func compressNoContextTakeover(w io.WriteCloser, level int) io.WriteCloser { + p := &flateWriterPools[level-minCompressionLevel] + tw := &truncWriter{w: w} + fw, _ := p.Get().(*flate.Writer) + if fw == nil { + fw, _ = flate.NewWriter(tw, level) + } else { + fw.Reset(tw) + } + return &flateWriteWrapper{fw: fw, tw: tw, p: p} +} + +// truncWriter is an io.Writer that writes all but the last four bytes of the +// stream to another io.Writer. +type truncWriter struct { + w io.WriteCloser + n int + p [4]byte +} + +func (w *truncWriter) Write(p []byte) (int, error) { + n := 0 + + // fill buffer first for simplicity. + if w.n < len(w.p) { + n = copy(w.p[w.n:], p) + p = p[n:] + w.n += n + if len(p) == 0 { + return n, nil + } + } + + m := len(p) + if m > len(w.p) { + m = len(w.p) + } + + if nn, err := w.w.Write(w.p[:m]); err != nil { + return n + nn, err + } + + copy(w.p[:], w.p[m:]) + copy(w.p[len(w.p)-m:], p[len(p)-m:]) + nn, err := w.w.Write(p[:len(p)-m]) + return n + nn, err +} + +type flateWriteWrapper struct { + fw *flate.Writer + tw *truncWriter + p *sync.Pool +} + +func (w *flateWriteWrapper) Write(p []byte) (int, error) { + if w.fw == nil { + return 0, errWriteClosed + } + return w.fw.Write(p) +} + +func (w *flateWriteWrapper) Close() error { + if w.fw == nil { + return errWriteClosed + } + err1 := w.fw.Flush() + w.p.Put(w.fw) + w.fw = nil + if w.tw.p != [4]byte{0, 0, 0xff, 0xff} { + return errors.New("websocket: internal error, unexpected bytes at end of flate stream") + } + err2 := w.tw.w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +type flateReadWrapper struct { + fr io.ReadCloser +} + +func (r *flateReadWrapper) Read(p []byte) (int, error) { + if r.fr == nil { + return 0, io.ErrClosedPipe + } + n, err := r.fr.Read(p) + if err == io.EOF { + // Preemptively place the reader back in the pool. This helps with + // scenarios where the application does not call NextReader() soon after + // this final read. + r.Close() + } + return n, err +} + +func (r *flateReadWrapper) Close() error { + if r.fr == nil { + return io.ErrClosedPipe + } + err := r.fr.Close() + flateReaderPool.Put(r.fr) + r.fr = nil + return err +} diff --git a/vendor/github.com/gorilla/websocket/conn.go b/vendor/github.com/gorilla/websocket/conn.go new file mode 100644 index 0000000..97e1dba --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn.go @@ -0,0 +1,1149 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "encoding/binary" + "errors" + "io" + "io/ioutil" + "math/rand" + "net" + "strconv" + "sync" + "time" + "unicode/utf8" +) + +const ( + // Frame header byte 0 bits from Section 5.2 of RFC 6455 + finalBit = 1 << 7 + rsv1Bit = 1 << 6 + rsv2Bit = 1 << 5 + rsv3Bit = 1 << 4 + + // Frame header byte 1 bits from Section 5.2 of RFC 6455 + maskBit = 1 << 7 + + maxFrameHeaderSize = 2 + 8 + 4 // Fixed header + length + mask + maxControlFramePayloadSize = 125 + + writeWait = time.Second + + defaultReadBufferSize = 4096 + defaultWriteBufferSize = 4096 + + continuationFrame = 0 + noFrame = -1 +) + +// Close codes defined in RFC 6455, section 11.7. +const ( + CloseNormalClosure = 1000 + CloseGoingAway = 1001 + CloseProtocolError = 1002 + CloseUnsupportedData = 1003 + CloseNoStatusReceived = 1005 + CloseAbnormalClosure = 1006 + CloseInvalidFramePayloadData = 1007 + ClosePolicyViolation = 1008 + CloseMessageTooBig = 1009 + CloseMandatoryExtension = 1010 + CloseInternalServerErr = 1011 + CloseServiceRestart = 1012 + CloseTryAgainLater = 1013 + CloseTLSHandshake = 1015 +) + +// The message types are defined in RFC 6455, section 11.8. +const ( + // TextMessage denotes a text data message. The text message payload is + // interpreted as UTF-8 encoded text data. + TextMessage = 1 + + // BinaryMessage denotes a binary data message. + BinaryMessage = 2 + + // CloseMessage denotes a close control message. The optional message + // payload contains a numeric code and text. Use the FormatCloseMessage + // function to format a close message payload. + CloseMessage = 8 + + // PingMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PingMessage = 9 + + // PongMessage denotes a ping control message. The optional message payload + // is UTF-8 encoded text. + PongMessage = 10 +) + +// ErrCloseSent is returned when the application writes a message to the +// connection after sending a close message. +var ErrCloseSent = errors.New("websocket: close sent") + +// ErrReadLimit is returned when reading a message that is larger than the +// read limit set for the connection. +var ErrReadLimit = errors.New("websocket: read limit exceeded") + +// netError satisfies the net Error interface. +type netError struct { + msg string + temporary bool + timeout bool +} + +func (e *netError) Error() string { return e.msg } +func (e *netError) Temporary() bool { return e.temporary } +func (e *netError) Timeout() bool { return e.timeout } + +// CloseError represents close frame. +type CloseError struct { + + // Code is defined in RFC 6455, section 11.7. + Code int + + // Text is the optional text payload. + Text string +} + +func (e *CloseError) Error() string { + s := []byte("websocket: close ") + s = strconv.AppendInt(s, int64(e.Code), 10) + switch e.Code { + case CloseNormalClosure: + s = append(s, " (normal)"...) + case CloseGoingAway: + s = append(s, " (going away)"...) + case CloseProtocolError: + s = append(s, " (protocol error)"...) + case CloseUnsupportedData: + s = append(s, " (unsupported data)"...) + case CloseNoStatusReceived: + s = append(s, " (no status)"...) + case CloseAbnormalClosure: + s = append(s, " (abnormal closure)"...) + case CloseInvalidFramePayloadData: + s = append(s, " (invalid payload data)"...) + case ClosePolicyViolation: + s = append(s, " (policy violation)"...) + case CloseMessageTooBig: + s = append(s, " (message too big)"...) + case CloseMandatoryExtension: + s = append(s, " (mandatory extension missing)"...) + case CloseInternalServerErr: + s = append(s, " (internal server error)"...) + case CloseTLSHandshake: + s = append(s, " (TLS handshake error)"...) + } + if e.Text != "" { + s = append(s, ": "...) + s = append(s, e.Text...) + } + return string(s) +} + +// IsCloseError returns boolean indicating whether the error is a *CloseError +// with one of the specified codes. +func IsCloseError(err error, codes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range codes { + if e.Code == code { + return true + } + } + } + return false +} + +// IsUnexpectedCloseError returns boolean indicating whether the error is a +// *CloseError with a code not in the list of expected codes. +func IsUnexpectedCloseError(err error, expectedCodes ...int) bool { + if e, ok := err.(*CloseError); ok { + for _, code := range expectedCodes { + if e.Code == code { + return false + } + } + return true + } + return false +} + +var ( + errWriteTimeout = &netError{msg: "websocket: write timeout", timeout: true, temporary: true} + errUnexpectedEOF = &CloseError{Code: CloseAbnormalClosure, Text: io.ErrUnexpectedEOF.Error()} + errBadWriteOpCode = errors.New("websocket: bad write message type") + errWriteClosed = errors.New("websocket: write closed") + errInvalidControlFrame = errors.New("websocket: invalid control frame") +) + +func newMaskKey() [4]byte { + n := rand.Uint32() + return [4]byte{byte(n), byte(n >> 8), byte(n >> 16), byte(n >> 24)} +} + +func hideTempErr(err error) error { + if e, ok := err.(net.Error); ok && e.Temporary() { + err = &netError{msg: e.Error(), timeout: e.Timeout()} + } + return err +} + +func isControl(frameType int) bool { + return frameType == CloseMessage || frameType == PingMessage || frameType == PongMessage +} + +func isData(frameType int) bool { + return frameType == TextMessage || frameType == BinaryMessage +} + +var validReceivedCloseCodes = map[int]bool{ + // see http://www.iana.org/assignments/websocket/websocket.xhtml#close-code-number + + CloseNormalClosure: true, + CloseGoingAway: true, + CloseProtocolError: true, + CloseUnsupportedData: true, + CloseNoStatusReceived: false, + CloseAbnormalClosure: false, + CloseInvalidFramePayloadData: true, + ClosePolicyViolation: true, + CloseMessageTooBig: true, + CloseMandatoryExtension: true, + CloseInternalServerErr: true, + CloseServiceRestart: true, + CloseTryAgainLater: true, + CloseTLSHandshake: false, +} + +func isValidReceivedCloseCode(code int) bool { + return validReceivedCloseCodes[code] || (code >= 3000 && code <= 4999) +} + +// The Conn type represents a WebSocket connection. +type Conn struct { + conn net.Conn + isServer bool + subprotocol string + + // Write fields + mu chan bool // used as mutex to protect write to conn + writeBuf []byte // frame is constructed in this buffer. + writeDeadline time.Time + writer io.WriteCloser // the current writer returned to the application + isWriting bool // for best-effort concurrent write detection + + writeErrMu sync.Mutex + writeErr error + + enableWriteCompression bool + compressionLevel int + newCompressionWriter func(io.WriteCloser, int) io.WriteCloser + + // Read fields + reader io.ReadCloser // the current reader returned to the application + readErr error + br *bufio.Reader + readRemaining int64 // bytes remaining in current frame. + readFinal bool // true the current message has more frames. + readLength int64 // Message size. + readLimit int64 // Maximum message size. + readMaskPos int + readMaskKey [4]byte + handlePong func(string) error + handlePing func(string) error + handleClose func(int, string) error + readErrCount int + messageReader *messageReader // the current low-level reader + + readDecompress bool // whether last read frame had RSV1 set + newDecompressionReader func(io.Reader) io.ReadCloser +} + +func newConn(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int) *Conn { + return newConnBRW(conn, isServer, readBufferSize, writeBufferSize, nil) +} + +type writeHook struct { + p []byte +} + +func (wh *writeHook) Write(p []byte) (int, error) { + wh.p = p + return len(p), nil +} + +func newConnBRW(conn net.Conn, isServer bool, readBufferSize, writeBufferSize int, brw *bufio.ReadWriter) *Conn { + mu := make(chan bool, 1) + mu <- true + + var br *bufio.Reader + if readBufferSize == 0 && brw != nil && brw.Reader != nil { + // Reuse the supplied bufio.Reader if the buffer has a useful size. + // This code assumes that peek on a reader returns + // bufio.Reader.buf[:0]. + brw.Reader.Reset(conn) + if p, err := brw.Reader.Peek(0); err == nil && cap(p) >= 256 { + br = brw.Reader + } + } + if br == nil { + if readBufferSize == 0 { + readBufferSize = defaultReadBufferSize + } + if readBufferSize < maxControlFramePayloadSize { + readBufferSize = maxControlFramePayloadSize + } + br = bufio.NewReaderSize(conn, readBufferSize) + } + + var writeBuf []byte + if writeBufferSize == 0 && brw != nil && brw.Writer != nil { + // Use the bufio.Writer's buffer if the buffer has a useful size. This + // code assumes that bufio.Writer.buf[:1] is passed to the + // bufio.Writer's underlying writer. + var wh writeHook + brw.Writer.Reset(&wh) + brw.Writer.WriteByte(0) + brw.Flush() + if cap(wh.p) >= maxFrameHeaderSize+256 { + writeBuf = wh.p[:cap(wh.p)] + } + } + + if writeBuf == nil { + if writeBufferSize == 0 { + writeBufferSize = defaultWriteBufferSize + } + writeBuf = make([]byte, writeBufferSize+maxFrameHeaderSize) + } + + c := &Conn{ + isServer: isServer, + br: br, + conn: conn, + mu: mu, + readFinal: true, + writeBuf: writeBuf, + enableWriteCompression: true, + compressionLevel: defaultCompressionLevel, + } + c.SetCloseHandler(nil) + c.SetPingHandler(nil) + c.SetPongHandler(nil) + return c +} + +// Subprotocol returns the negotiated protocol for the connection. +func (c *Conn) Subprotocol() string { + return c.subprotocol +} + +// Close closes the underlying network connection without sending or waiting for a close frame. +func (c *Conn) Close() error { + return c.conn.Close() +} + +// LocalAddr returns the local network address. +func (c *Conn) LocalAddr() net.Addr { + return c.conn.LocalAddr() +} + +// RemoteAddr returns the remote network address. +func (c *Conn) RemoteAddr() net.Addr { + return c.conn.RemoteAddr() +} + +// Write methods + +func (c *Conn) writeFatal(err error) error { + err = hideTempErr(err) + c.writeErrMu.Lock() + if c.writeErr == nil { + c.writeErr = err + } + c.writeErrMu.Unlock() + return err +} + +func (c *Conn) write(frameType int, deadline time.Time, bufs ...[]byte) error { + <-c.mu + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + for _, buf := range bufs { + if len(buf) > 0 { + _, err := c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + } + } + + if frameType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return nil +} + +// WriteControl writes a control message with the given deadline. The allowed +// message types are CloseMessage, PingMessage and PongMessage. +func (c *Conn) WriteControl(messageType int, data []byte, deadline time.Time) error { + if !isControl(messageType) { + return errBadWriteOpCode + } + if len(data) > maxControlFramePayloadSize { + return errInvalidControlFrame + } + + b0 := byte(messageType) | finalBit + b1 := byte(len(data)) + if !c.isServer { + b1 |= maskBit + } + + buf := make([]byte, 0, maxFrameHeaderSize+maxControlFramePayloadSize) + buf = append(buf, b0, b1) + + if c.isServer { + buf = append(buf, data...) + } else { + key := newMaskKey() + buf = append(buf, key[:]...) + buf = append(buf, data...) + maskBytes(key, 0, buf[6:]) + } + + d := time.Hour * 1000 + if !deadline.IsZero() { + d = deadline.Sub(time.Now()) + if d < 0 { + return errWriteTimeout + } + } + + timer := time.NewTimer(d) + select { + case <-c.mu: + timer.Stop() + case <-timer.C: + return errWriteTimeout + } + defer func() { c.mu <- true }() + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + if err != nil { + return err + } + + c.conn.SetWriteDeadline(deadline) + _, err = c.conn.Write(buf) + if err != nil { + return c.writeFatal(err) + } + if messageType == CloseMessage { + c.writeFatal(ErrCloseSent) + } + return err +} + +func (c *Conn) prepWrite(messageType int) error { + // Close previous writer if not already closed by the application. It's + // probably better to return an error in this situation, but we cannot + // change this without breaking existing applications. + if c.writer != nil { + c.writer.Close() + c.writer = nil + } + + if !isControl(messageType) && !isData(messageType) { + return errBadWriteOpCode + } + + c.writeErrMu.Lock() + err := c.writeErr + c.writeErrMu.Unlock() + return err +} + +// NextWriter returns a writer for the next message to send. The writer's Close +// method flushes the complete message to the network. +// +// There can be at most one open writer on a connection. NextWriter closes the +// previous writer if the application has not already done so. +func (c *Conn) NextWriter(messageType int) (io.WriteCloser, error) { + if err := c.prepWrite(messageType); err != nil { + return nil, err + } + + mw := &messageWriter{ + c: c, + frameType: messageType, + pos: maxFrameHeaderSize, + } + c.writer = mw + if c.newCompressionWriter != nil && c.enableWriteCompression && isData(messageType) { + w := c.newCompressionWriter(c.writer, c.compressionLevel) + mw.compress = true + c.writer = w + } + return c.writer, nil +} + +type messageWriter struct { + c *Conn + compress bool // whether next call to flushFrame should set RSV1 + pos int // end of data in writeBuf. + frameType int // type of the current frame. + err error +} + +func (w *messageWriter) fatal(err error) error { + if w.err != nil { + w.err = err + w.c.writer = nil + } + return err +} + +// flushFrame writes buffered data and extra as a frame to the network. The +// final argument indicates that this is the last frame in the message. +func (w *messageWriter) flushFrame(final bool, extra []byte) error { + c := w.c + length := w.pos - maxFrameHeaderSize + len(extra) + + // Check for invalid control frames. + if isControl(w.frameType) && + (!final || length > maxControlFramePayloadSize) { + return w.fatal(errInvalidControlFrame) + } + + b0 := byte(w.frameType) + if final { + b0 |= finalBit + } + if w.compress { + b0 |= rsv1Bit + } + w.compress = false + + b1 := byte(0) + if !c.isServer { + b1 |= maskBit + } + + // Assume that the frame starts at beginning of c.writeBuf. + framePos := 0 + if c.isServer { + // Adjust up if mask not included in the header. + framePos = 4 + } + + switch { + case length >= 65536: + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 127 + binary.BigEndian.PutUint64(c.writeBuf[framePos+2:], uint64(length)) + case length > 125: + framePos += 6 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | 126 + binary.BigEndian.PutUint16(c.writeBuf[framePos+2:], uint16(length)) + default: + framePos += 8 + c.writeBuf[framePos] = b0 + c.writeBuf[framePos+1] = b1 | byte(length) + } + + if !c.isServer { + key := newMaskKey() + copy(c.writeBuf[maxFrameHeaderSize-4:], key[:]) + maskBytes(key, 0, c.writeBuf[maxFrameHeaderSize:w.pos]) + if len(extra) > 0 { + return c.writeFatal(errors.New("websocket: internal error, extra used in client mode")) + } + } + + // Write the buffers to the connection with best-effort detection of + // concurrent writes. See the concurrency section in the package + // documentation for more info. + + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + + err := c.write(w.frameType, c.writeDeadline, c.writeBuf[framePos:w.pos], extra) + + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + + if err != nil { + return w.fatal(err) + } + + if final { + c.writer = nil + return nil + } + + // Setup for next frame. + w.pos = maxFrameHeaderSize + w.frameType = continuationFrame + return nil +} + +func (w *messageWriter) ncopy(max int) (int, error) { + n := len(w.c.writeBuf) - w.pos + if n <= 0 { + if err := w.flushFrame(false, nil); err != nil { + return 0, err + } + n = len(w.c.writeBuf) - w.pos + } + if n > max { + n = max + } + return n, nil +} + +func (w *messageWriter) Write(p []byte) (int, error) { + if w.err != nil { + return 0, w.err + } + + if len(p) > 2*len(w.c.writeBuf) && w.c.isServer { + // Don't buffer large messages. + err := w.flushFrame(false, p) + if err != nil { + return 0, err + } + return len(p), nil + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) WriteString(p string) (int, error) { + if w.err != nil { + return 0, w.err + } + + nn := len(p) + for len(p) > 0 { + n, err := w.ncopy(len(p)) + if err != nil { + return 0, err + } + copy(w.c.writeBuf[w.pos:], p[:n]) + w.pos += n + p = p[n:] + } + return nn, nil +} + +func (w *messageWriter) ReadFrom(r io.Reader) (nn int64, err error) { + if w.err != nil { + return 0, w.err + } + for { + if w.pos == len(w.c.writeBuf) { + err = w.flushFrame(false, nil) + if err != nil { + break + } + } + var n int + n, err = r.Read(w.c.writeBuf[w.pos:]) + w.pos += n + nn += int64(n) + if err != nil { + if err == io.EOF { + err = nil + } + break + } + } + return nn, err +} + +func (w *messageWriter) Close() error { + if w.err != nil { + return w.err + } + if err := w.flushFrame(true, nil); err != nil { + return err + } + w.err = errWriteClosed + return nil +} + +// WritePreparedMessage writes prepared message into connection. +func (c *Conn) WritePreparedMessage(pm *PreparedMessage) error { + frameType, frameData, err := pm.frame(prepareKey{ + isServer: c.isServer, + compress: c.newCompressionWriter != nil && c.enableWriteCompression && isData(pm.messageType), + compressionLevel: c.compressionLevel, + }) + if err != nil { + return err + } + if c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = true + err = c.write(frameType, c.writeDeadline, frameData, nil) + if !c.isWriting { + panic("concurrent write to websocket connection") + } + c.isWriting = false + return err +} + +// WriteMessage is a helper method for getting a writer using NextWriter, +// writing the message and closing the writer. +func (c *Conn) WriteMessage(messageType int, data []byte) error { + + if c.isServer && (c.newCompressionWriter == nil || !c.enableWriteCompression) { + // Fast path with no allocations and single frame. + + if err := c.prepWrite(messageType); err != nil { + return err + } + mw := messageWriter{c: c, frameType: messageType, pos: maxFrameHeaderSize} + n := copy(c.writeBuf[mw.pos:], data) + mw.pos += n + data = data[n:] + return mw.flushFrame(true, data) + } + + w, err := c.NextWriter(messageType) + if err != nil { + return err + } + if _, err = w.Write(data); err != nil { + return err + } + return w.Close() +} + +// SetWriteDeadline sets the write deadline on the underlying network +// connection. After a write has timed out, the websocket state is corrupt and +// all future writes will return an error. A zero value for t means writes will +// not time out. +func (c *Conn) SetWriteDeadline(t time.Time) error { + c.writeDeadline = t + return nil +} + +// Read methods + +func (c *Conn) advanceFrame() (int, error) { + + // 1. Skip remainder of previous frame. + + if c.readRemaining > 0 { + if _, err := io.CopyN(ioutil.Discard, c.br, c.readRemaining); err != nil { + return noFrame, err + } + } + + // 2. Read and parse first two bytes of frame header. + + p, err := c.read(2) + if err != nil { + return noFrame, err + } + + final := p[0]&finalBit != 0 + frameType := int(p[0] & 0xf) + mask := p[1]&maskBit != 0 + c.readRemaining = int64(p[1] & 0x7f) + + c.readDecompress = false + if c.newDecompressionReader != nil && (p[0]&rsv1Bit) != 0 { + c.readDecompress = true + p[0] &^= rsv1Bit + } + + if rsv := p[0] & (rsv1Bit | rsv2Bit | rsv3Bit); rsv != 0 { + return noFrame, c.handleProtocolError("unexpected reserved bits 0x" + strconv.FormatInt(int64(rsv), 16)) + } + + switch frameType { + case CloseMessage, PingMessage, PongMessage: + if c.readRemaining > maxControlFramePayloadSize { + return noFrame, c.handleProtocolError("control frame length > 125") + } + if !final { + return noFrame, c.handleProtocolError("control frame not final") + } + case TextMessage, BinaryMessage: + if !c.readFinal { + return noFrame, c.handleProtocolError("message start before final message frame") + } + c.readFinal = final + case continuationFrame: + if c.readFinal { + return noFrame, c.handleProtocolError("continuation after final message frame") + } + c.readFinal = final + default: + return noFrame, c.handleProtocolError("unknown opcode " + strconv.Itoa(frameType)) + } + + // 3. Read and parse frame length. + + switch c.readRemaining { + case 126: + p, err := c.read(2) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint16(p)) + case 127: + p, err := c.read(8) + if err != nil { + return noFrame, err + } + c.readRemaining = int64(binary.BigEndian.Uint64(p)) + } + + // 4. Handle frame masking. + + if mask != c.isServer { + return noFrame, c.handleProtocolError("incorrect mask flag") + } + + if mask { + c.readMaskPos = 0 + p, err := c.read(len(c.readMaskKey)) + if err != nil { + return noFrame, err + } + copy(c.readMaskKey[:], p) + } + + // 5. For text and binary messages, enforce read limit and return. + + if frameType == continuationFrame || frameType == TextMessage || frameType == BinaryMessage { + + c.readLength += c.readRemaining + if c.readLimit > 0 && c.readLength > c.readLimit { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseMessageTooBig, ""), time.Now().Add(writeWait)) + return noFrame, ErrReadLimit + } + + return frameType, nil + } + + // 6. Read control frame payload. + + var payload []byte + if c.readRemaining > 0 { + payload, err = c.read(int(c.readRemaining)) + c.readRemaining = 0 + if err != nil { + return noFrame, err + } + if c.isServer { + maskBytes(c.readMaskKey, 0, payload) + } + } + + // 7. Process control frame payload. + + switch frameType { + case PongMessage: + if err := c.handlePong(string(payload)); err != nil { + return noFrame, err + } + case PingMessage: + if err := c.handlePing(string(payload)); err != nil { + return noFrame, err + } + case CloseMessage: + closeCode := CloseNoStatusReceived + closeText := "" + if len(payload) >= 2 { + closeCode = int(binary.BigEndian.Uint16(payload)) + if !isValidReceivedCloseCode(closeCode) { + return noFrame, c.handleProtocolError("invalid close code") + } + closeText = string(payload[2:]) + if !utf8.ValidString(closeText) { + return noFrame, c.handleProtocolError("invalid utf8 payload in close frame") + } + } + if err := c.handleClose(closeCode, closeText); err != nil { + return noFrame, err + } + return noFrame, &CloseError{Code: closeCode, Text: closeText} + } + + return frameType, nil +} + +func (c *Conn) handleProtocolError(message string) error { + c.WriteControl(CloseMessage, FormatCloseMessage(CloseProtocolError, message), time.Now().Add(writeWait)) + return errors.New("websocket: " + message) +} + +// NextReader returns the next data message received from the peer. The +// returned messageType is either TextMessage or BinaryMessage. +// +// There can be at most one open reader on a connection. NextReader discards +// the previous message if the application has not already consumed it. +// +// Applications must break out of the application's read loop when this method +// returns a non-nil error value. Errors returned from this method are +// permanent. Once this method returns a non-nil error, all subsequent calls to +// this method return the same error. +func (c *Conn) NextReader() (messageType int, r io.Reader, err error) { + // Close previous reader, only relevant for decompression. + if c.reader != nil { + c.reader.Close() + c.reader = nil + } + + c.messageReader = nil + c.readLength = 0 + + for c.readErr == nil { + frameType, err := c.advanceFrame() + if err != nil { + c.readErr = hideTempErr(err) + break + } + if frameType == TextMessage || frameType == BinaryMessage { + c.messageReader = &messageReader{c} + c.reader = c.messageReader + if c.readDecompress { + c.reader = c.newDecompressionReader(c.reader) + } + return frameType, c.reader, nil + } + } + + // Applications that do handle the error returned from this method spin in + // tight loop on connection failure. To help application developers detect + // this error, panic on repeated reads to the failed connection. + c.readErrCount++ + if c.readErrCount >= 1000 { + panic("repeated read on failed websocket connection") + } + + return noFrame, nil, c.readErr +} + +type messageReader struct{ c *Conn } + +func (r *messageReader) Read(b []byte) (int, error) { + c := r.c + if c.messageReader != r { + return 0, io.EOF + } + + for c.readErr == nil { + + if c.readRemaining > 0 { + if int64(len(b)) > c.readRemaining { + b = b[:c.readRemaining] + } + n, err := c.br.Read(b) + c.readErr = hideTempErr(err) + if c.isServer { + c.readMaskPos = maskBytes(c.readMaskKey, c.readMaskPos, b[:n]) + } + c.readRemaining -= int64(n) + if c.readRemaining > 0 && c.readErr == io.EOF { + c.readErr = errUnexpectedEOF + } + return n, c.readErr + } + + if c.readFinal { + c.messageReader = nil + return 0, io.EOF + } + + frameType, err := c.advanceFrame() + switch { + case err != nil: + c.readErr = hideTempErr(err) + case frameType == TextMessage || frameType == BinaryMessage: + c.readErr = errors.New("websocket: internal error, unexpected text or binary in Reader") + } + } + + err := c.readErr + if err == io.EOF && c.messageReader == r { + err = errUnexpectedEOF + } + return 0, err +} + +func (r *messageReader) Close() error { + return nil +} + +// ReadMessage is a helper method for getting a reader using NextReader and +// reading from that reader to a buffer. +func (c *Conn) ReadMessage() (messageType int, p []byte, err error) { + var r io.Reader + messageType, r, err = c.NextReader() + if err != nil { + return messageType, nil, err + } + p, err = ioutil.ReadAll(r) + return messageType, p, err +} + +// SetReadDeadline sets the read deadline on the underlying network connection. +// After a read has timed out, the websocket connection state is corrupt and +// all future reads will return an error. A zero value for t means reads will +// not time out. +func (c *Conn) SetReadDeadline(t time.Time) error { + return c.conn.SetReadDeadline(t) +} + +// SetReadLimit sets the maximum size for a message read from the peer. If a +// message exceeds the limit, the connection sends a close frame to the peer +// and returns ErrReadLimit to the application. +func (c *Conn) SetReadLimit(limit int64) { + c.readLimit = limit +} + +// CloseHandler returns the current close handler +func (c *Conn) CloseHandler() func(code int, text string) error { + return c.handleClose +} + +// SetCloseHandler sets the handler for close messages received from the peer. +// The code argument to h is the received close code or CloseNoStatusReceived +// if the close message is empty. The default close handler sends a close frame +// back to the peer. +// +// The application must read the connection to process close messages as +// described in the section on Control Frames above. +// +// The connection read methods return a CloseError when a close frame is +// received. Most applications should handle close messages as part of their +// normal error handling. Applications should only set a close handler when the +// application must perform some action before sending a close frame back to +// the peer. +func (c *Conn) SetCloseHandler(h func(code int, text string) error) { + if h == nil { + h = func(code int, text string) error { + message := []byte{} + if code != CloseNoStatusReceived { + message = FormatCloseMessage(code, "") + } + c.WriteControl(CloseMessage, message, time.Now().Add(writeWait)) + return nil + } + } + c.handleClose = h +} + +// PingHandler returns the current ping handler +func (c *Conn) PingHandler() func(appData string) error { + return c.handlePing +} + +// SetPingHandler sets the handler for ping messages received from the peer. +// The appData argument to h is the PING frame application data. The default +// ping handler sends a pong to the peer. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPingHandler(h func(appData string) error) { + if h == nil { + h = func(message string) error { + err := c.WriteControl(PongMessage, []byte(message), time.Now().Add(writeWait)) + if err == ErrCloseSent { + return nil + } else if e, ok := err.(net.Error); ok && e.Temporary() { + return nil + } + return err + } + } + c.handlePing = h +} + +// PongHandler returns the current pong handler +func (c *Conn) PongHandler() func(appData string) error { + return c.handlePong +} + +// SetPongHandler sets the handler for pong messages received from the peer. +// The appData argument to h is the PONG frame application data. The default +// pong handler does nothing. +// +// The application must read the connection to process ping messages as +// described in the section on Control Frames above. +func (c *Conn) SetPongHandler(h func(appData string) error) { + if h == nil { + h = func(string) error { return nil } + } + c.handlePong = h +} + +// UnderlyingConn returns the internal net.Conn. This can be used to further +// modifications to connection specific flags. +func (c *Conn) UnderlyingConn() net.Conn { + return c.conn +} + +// EnableWriteCompression enables and disables write compression of +// subsequent text and binary messages. This function is a noop if +// compression was not negotiated with the peer. +func (c *Conn) EnableWriteCompression(enable bool) { + c.enableWriteCompression = enable +} + +// SetCompressionLevel sets the flate compression level for subsequent text and +// binary messages. This function is a noop if compression was not negotiated +// with the peer. See the compress/flate package for a description of +// compression levels. +func (c *Conn) SetCompressionLevel(level int) error { + if !isValidCompressionLevel(level) { + return errors.New("websocket: invalid compression level") + } + c.compressionLevel = level + return nil +} + +// FormatCloseMessage formats closeCode and text as a WebSocket close message. +func FormatCloseMessage(closeCode int, text string) []byte { + buf := make([]byte, 2+len(text)) + binary.BigEndian.PutUint16(buf, uint16(closeCode)) + copy(buf[2:], text) + return buf +} diff --git a/vendor/github.com/gorilla/websocket/conn_read.go b/vendor/github.com/gorilla/websocket/conn_read.go new file mode 100644 index 0000000..1ea1505 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read.go @@ -0,0 +1,18 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + c.br.Discard(len(p)) + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/conn_read_legacy.go b/vendor/github.com/gorilla/websocket/conn_read_legacy.go new file mode 100644 index 0000000..018541c --- /dev/null +++ b/vendor/github.com/gorilla/websocket/conn_read_legacy.go @@ -0,0 +1,21 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.5 + +package websocket + +import "io" + +func (c *Conn) read(n int) ([]byte, error) { + p, err := c.br.Peek(n) + if err == io.EOF { + err = errUnexpectedEOF + } + if len(p) > 0 { + // advance over the bytes just read + io.ReadFull(c.br, p) + } + return p, err +} diff --git a/vendor/github.com/gorilla/websocket/doc.go b/vendor/github.com/gorilla/websocket/doc.go new file mode 100644 index 0000000..f5ff082 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/doc.go @@ -0,0 +1,179 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package websocket implements the WebSocket protocol defined in RFC 6455. +// +// Overview +// +// The Conn type represents a WebSocket connection. A server application calls +// the Upgrader.Upgrade method from an HTTP request handler to get a *Conn: +// +// var upgrader = websocket.Upgrader{ +// ReadBufferSize: 1024, +// WriteBufferSize: 1024, +// } +// +// func handler(w http.ResponseWriter, r *http.Request) { +// conn, err := upgrader.Upgrade(w, r, nil) +// if err != nil { +// log.Println(err) +// return +// } +// ... Use conn to send and receive messages. +// } +// +// Call the connection's WriteMessage and ReadMessage methods to send and +// receive messages as a slice of bytes. This snippet of code shows how to echo +// messages using these methods: +// +// for { +// messageType, p, err := conn.ReadMessage() +// if err != nil { +// return +// } +// if err := conn.WriteMessage(messageType, p); err != nil { +// return err +// } +// } +// +// In above snippet of code, p is a []byte and messageType is an int with value +// websocket.BinaryMessage or websocket.TextMessage. +// +// An application can also send and receive messages using the io.WriteCloser +// and io.Reader interfaces. To send a message, call the connection NextWriter +// method to get an io.WriteCloser, write the message to the writer and close +// the writer when done. To receive a message, call the connection NextReader +// method to get an io.Reader and read until io.EOF is returned. This snippet +// shows how to echo messages using the NextWriter and NextReader methods: +// +// for { +// messageType, r, err := conn.NextReader() +// if err != nil { +// return +// } +// w, err := conn.NextWriter(messageType) +// if err != nil { +// return err +// } +// if _, err := io.Copy(w, r); err != nil { +// return err +// } +// if err := w.Close(); err != nil { +// return err +// } +// } +// +// Data Messages +// +// The WebSocket protocol distinguishes between text and binary data messages. +// Text messages are interpreted as UTF-8 encoded text. The interpretation of +// binary messages is left to the application. +// +// This package uses the TextMessage and BinaryMessage integer constants to +// identify the two data message types. The ReadMessage and NextReader methods +// return the type of the received message. The messageType argument to the +// WriteMessage and NextWriter methods specifies the type of a sent message. +// +// It is the application's responsibility to ensure that text messages are +// valid UTF-8 encoded text. +// +// Control Messages +// +// The WebSocket protocol defines three types of control messages: close, ping +// and pong. Call the connection WriteControl, WriteMessage or NextWriter +// methods to send a control message to the peer. +// +// Connections handle received close messages by sending a close message to the +// peer and returning a *CloseError from the the NextReader, ReadMessage or the +// message Read method. +// +// Connections handle received ping and pong messages by invoking callback +// functions set with SetPingHandler and SetPongHandler methods. The callback +// functions are called from the NextReader, ReadMessage and the message Read +// methods. +// +// The default ping handler sends a pong to the peer. The application's reading +// goroutine can block for a short time while the handler writes the pong data +// to the connection. +// +// The application must read the connection to process ping, pong and close +// messages sent from the peer. If the application is not otherwise interested +// in messages from the peer, then the application should start a goroutine to +// read and discard messages from the peer. A simple example is: +// +// func readLoop(c *websocket.Conn) { +// for { +// if _, _, err := c.NextReader(); err != nil { +// c.Close() +// break +// } +// } +// } +// +// Concurrency +// +// Connections support one concurrent reader and one concurrent writer. +// +// Applications are responsible for ensuring that no more than one goroutine +// calls the write methods (NextWriter, SetWriteDeadline, WriteMessage, +// WriteJSON, EnableWriteCompression, SetCompressionLevel) concurrently and +// that no more than one goroutine calls the read methods (NextReader, +// SetReadDeadline, ReadMessage, ReadJSON, SetPongHandler, SetPingHandler) +// concurrently. +// +// The Close and WriteControl methods can be called concurrently with all other +// methods. +// +// Origin Considerations +// +// Web browsers allow Javascript applications to open a WebSocket connection to +// any host. It's up to the server to enforce an origin policy using the Origin +// request header sent by the browser. +// +// The Upgrader calls the function specified in the CheckOrigin field to check +// the origin. If the CheckOrigin function returns false, then the Upgrade +// method fails the WebSocket handshake with HTTP status 403. +// +// If the CheckOrigin field is nil, then the Upgrader uses a safe default: fail +// the handshake if the Origin request header is present and not equal to the +// Host request header. +// +// An application can allow connections from any origin by specifying a +// function that always returns true: +// +// var upgrader = websocket.Upgrader{ +// CheckOrigin: func(r *http.Request) bool { return true }, +// } +// +// The deprecated package-level Upgrade function does not perform origin +// checking. The application is responsible for checking the Origin header +// before calling the Upgrade function. +// +// Compression EXPERIMENTAL +// +// Per message compression extensions (RFC 7692) are experimentally supported +// by this package in a limited capacity. Setting the EnableCompression option +// to true in Dialer or Upgrader will attempt to negotiate per message deflate +// support. +// +// var upgrader = websocket.Upgrader{ +// EnableCompression: true, +// } +// +// If compression was successfully negotiated with the connection's peer, any +// message received in compressed form will be automatically decompressed. +// All Read methods will return uncompressed bytes. +// +// Per message compression of messages written to a connection can be enabled +// or disabled by calling the corresponding Conn method: +// +// conn.EnableWriteCompression(false) +// +// Currently this package does not support compression with "context takeover". +// This means that messages must be compressed and decompressed in isolation, +// without retaining sliding window or dictionary state across messages. For +// more details refer to RFC 7692. +// +// Use of compression is experimental and may result in decreased performance. +package websocket diff --git a/vendor/github.com/gorilla/websocket/json.go b/vendor/github.com/gorilla/websocket/json.go new file mode 100644 index 0000000..dc2c1f6 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/json.go @@ -0,0 +1,60 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "encoding/json" + "io" +) + +// WriteJSON writes the JSON encoding of v as a message. +// +// Deprecated: Use c.WriteJSON instead. +func WriteJSON(c *Conn, v interface{}) error { + return c.WriteJSON(v) +} + +// WriteJSON writes the JSON encoding of v as a message. +// +// See the documentation for encoding/json Marshal for details about the +// conversion of Go values to JSON. +func (c *Conn) WriteJSON(v interface{}) error { + w, err := c.NextWriter(TextMessage) + if err != nil { + return err + } + err1 := json.NewEncoder(w).Encode(v) + err2 := w.Close() + if err1 != nil { + return err1 + } + return err2 +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// Deprecated: Use c.ReadJSON instead. +func ReadJSON(c *Conn, v interface{}) error { + return c.ReadJSON(v) +} + +// ReadJSON reads the next JSON-encoded message from the connection and stores +// it in the value pointed to by v. +// +// See the documentation for the encoding/json Unmarshal function for details +// about the conversion of JSON to a Go value. +func (c *Conn) ReadJSON(v interface{}) error { + _, r, err := c.NextReader() + if err != nil { + return err + } + err = json.NewDecoder(r).Decode(v) + if err == io.EOF { + // One value is expected in the message. + err = io.ErrUnexpectedEOF + } + return err +} diff --git a/vendor/github.com/gorilla/websocket/mask.go b/vendor/github.com/gorilla/websocket/mask.go new file mode 100644 index 0000000..6a88bbc --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask.go @@ -0,0 +1,55 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build !appengine + +package websocket + +import "unsafe" + +const wordSize = int(unsafe.Sizeof(uintptr(0))) + +func maskBytes(key [4]byte, pos int, b []byte) int { + + // Mask one byte at a time for small buffers. + if len(b) < 2*wordSize { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 + } + + // Mask one byte at a time to word boundary. + if n := int(uintptr(unsafe.Pointer(&b[0]))) % wordSize; n != 0 { + n = wordSize - n + for i := range b[:n] { + b[i] ^= key[pos&3] + pos++ + } + b = b[n:] + } + + // Create aligned word size key. + var k [wordSize]byte + for i := range k { + k[i] = key[(pos+i)&3] + } + kw := *(*uintptr)(unsafe.Pointer(&k)) + + // Mask one word at a time. + n := (len(b) / wordSize) * wordSize + for i := 0; i < n; i += wordSize { + *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&b[0])) + uintptr(i))) ^= kw + } + + // Mask one byte at a time for remaining bytes. + b = b[n:] + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/mask_safe.go b/vendor/github.com/gorilla/websocket/mask_safe.go new file mode 100644 index 0000000..2aac060 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/mask_safe.go @@ -0,0 +1,15 @@ +// Copyright 2016 The Gorilla WebSocket Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found in the +// LICENSE file. + +// +build appengine + +package websocket + +func maskBytes(key [4]byte, pos int, b []byte) int { + for i := range b { + b[i] ^= key[pos&3] + pos++ + } + return pos & 3 +} diff --git a/vendor/github.com/gorilla/websocket/prepared.go b/vendor/github.com/gorilla/websocket/prepared.go new file mode 100644 index 0000000..1efffbd --- /dev/null +++ b/vendor/github.com/gorilla/websocket/prepared.go @@ -0,0 +1,103 @@ +// Copyright 2017 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bytes" + "net" + "sync" + "time" +) + +// PreparedMessage caches on the wire representations of a message payload. +// Use PreparedMessage to efficiently send a message payload to multiple +// connections. PreparedMessage is especially useful when compression is used +// because the CPU and memory expensive compression operation can be executed +// once for a given set of compression options. +type PreparedMessage struct { + messageType int + data []byte + err error + mu sync.Mutex + frames map[prepareKey]*preparedFrame +} + +// prepareKey defines a unique set of options to cache prepared frames in PreparedMessage. +type prepareKey struct { + isServer bool + compress bool + compressionLevel int +} + +// preparedFrame contains data in wire representation. +type preparedFrame struct { + once sync.Once + data []byte +} + +// NewPreparedMessage returns an initialized PreparedMessage. You can then send +// it to connection using WritePreparedMessage method. Valid wire +// representation will be calculated lazily only once for a set of current +// connection options. +func NewPreparedMessage(messageType int, data []byte) (*PreparedMessage, error) { + pm := &PreparedMessage{ + messageType: messageType, + frames: make(map[prepareKey]*preparedFrame), + data: data, + } + + // Prepare a plain server frame. + _, frameData, err := pm.frame(prepareKey{isServer: true, compress: false}) + if err != nil { + return nil, err + } + + // To protect against caller modifying the data argument, remember the data + // copied to the plain server frame. + pm.data = frameData[len(frameData)-len(data):] + return pm, nil +} + +func (pm *PreparedMessage) frame(key prepareKey) (int, []byte, error) { + pm.mu.Lock() + frame, ok := pm.frames[key] + if !ok { + frame = &preparedFrame{} + pm.frames[key] = frame + } + pm.mu.Unlock() + + var err error + frame.once.Do(func() { + // Prepare a frame using a 'fake' connection. + // TODO: Refactor code in conn.go to allow more direct construction of + // the frame. + mu := make(chan bool, 1) + mu <- true + var nc prepareConn + c := &Conn{ + conn: &nc, + mu: mu, + isServer: key.isServer, + compressionLevel: key.compressionLevel, + enableWriteCompression: true, + writeBuf: make([]byte, defaultWriteBufferSize+maxFrameHeaderSize), + } + if key.compress { + c.newCompressionWriter = compressNoContextTakeover + } + err = c.WriteMessage(pm.messageType, pm.data) + frame.data = nc.buf.Bytes() + }) + return pm.messageType, frame.data, err +} + +type prepareConn struct { + buf bytes.Buffer + net.Conn +} + +func (pc *prepareConn) Write(p []byte) (int, error) { return pc.buf.Write(p) } +func (pc *prepareConn) SetWriteDeadline(t time.Time) error { return nil } diff --git a/vendor/github.com/gorilla/websocket/server.go b/vendor/github.com/gorilla/websocket/server.go new file mode 100644 index 0000000..6ae97c5 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/server.go @@ -0,0 +1,292 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "bufio" + "errors" + "net" + "net/http" + "net/url" + "strings" + "time" +) + +// HandshakeError describes an error with the handshake from the peer. +type HandshakeError struct { + message string +} + +func (e HandshakeError) Error() string { return e.message } + +// Upgrader specifies parameters for upgrading an HTTP connection to a +// WebSocket connection. +type Upgrader struct { + // HandshakeTimeout specifies the duration for the handshake to complete. + HandshakeTimeout time.Duration + + // ReadBufferSize and WriteBufferSize specify I/O buffer sizes. If a buffer + // size is zero, then buffers allocated by the HTTP server are used. The + // I/O buffer sizes do not limit the size of the messages that can be sent + // or received. + ReadBufferSize, WriteBufferSize int + + // Subprotocols specifies the server's supported protocols in order of + // preference. If this field is set, then the Upgrade method negotiates a + // subprotocol by selecting the first match in this list with a protocol + // requested by the client. + Subprotocols []string + + // Error specifies the function for generating HTTP error responses. If Error + // is nil, then http.Error is used to generate the HTTP response. + Error func(w http.ResponseWriter, r *http.Request, status int, reason error) + + // CheckOrigin returns true if the request Origin header is acceptable. If + // CheckOrigin is nil, the host in the Origin header must not be set or + // must match the host of the request. + CheckOrigin func(r *http.Request) bool + + // EnableCompression specify if the server should attempt to negotiate per + // message compression (RFC 7692). Setting this value to true does not + // guarantee that compression will be supported. Currently only "no context + // takeover" modes are supported. + EnableCompression bool +} + +func (u *Upgrader) returnError(w http.ResponseWriter, r *http.Request, status int, reason string) (*Conn, error) { + err := HandshakeError{reason} + if u.Error != nil { + u.Error(w, r, status, err) + } else { + w.Header().Set("Sec-Websocket-Version", "13") + http.Error(w, http.StatusText(status), status) + } + return nil, err +} + +// checkSameOrigin returns true if the origin is not set or is equal to the request host. +func checkSameOrigin(r *http.Request) bool { + origin := r.Header["Origin"] + if len(origin) == 0 { + return true + } + u, err := url.Parse(origin[0]) + if err != nil { + return false + } + return u.Host == r.Host +} + +func (u *Upgrader) selectSubprotocol(r *http.Request, responseHeader http.Header) string { + if u.Subprotocols != nil { + clientProtocols := Subprotocols(r) + for _, serverProtocol := range u.Subprotocols { + for _, clientProtocol := range clientProtocols { + if clientProtocol == serverProtocol { + return clientProtocol + } + } + } + } else if responseHeader != nil { + return responseHeader.Get("Sec-Websocket-Protocol") + } + return "" +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// application negotiated subprotocol (Sec-Websocket-Protocol). +// +// If the upgrade fails, then Upgrade replies to the client with an HTTP error +// response. +func (u *Upgrader) Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header) (*Conn, error) { + if r.Method != "GET" { + return u.returnError(w, r, http.StatusMethodNotAllowed, "websocket: not a websocket handshake: request method is not GET") + } + + if _, ok := responseHeader["Sec-Websocket-Extensions"]; ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: application specific 'Sec-Websocket-Extensions' headers are unsupported") + } + + if !tokenListContainsValue(r.Header, "Connection", "upgrade") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'upgrade' token not found in 'Connection' header") + } + + if !tokenListContainsValue(r.Header, "Upgrade", "websocket") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: 'websocket' token not found in 'Upgrade' header") + } + + if !tokenListContainsValue(r.Header, "Sec-Websocket-Version", "13") { + return u.returnError(w, r, http.StatusBadRequest, "websocket: unsupported version: 13 not found in 'Sec-Websocket-Version' header") + } + + checkOrigin := u.CheckOrigin + if checkOrigin == nil { + checkOrigin = checkSameOrigin + } + if !checkOrigin(r) { + return u.returnError(w, r, http.StatusForbidden, "websocket: 'Origin' header value not allowed") + } + + challengeKey := r.Header.Get("Sec-Websocket-Key") + if challengeKey == "" { + return u.returnError(w, r, http.StatusBadRequest, "websocket: not a websocket handshake: `Sec-Websocket-Key' header is missing or blank") + } + + subprotocol := u.selectSubprotocol(r, responseHeader) + + // Negotiate PMCE + var compress bool + if u.EnableCompression { + for _, ext := range parseExtensions(r.Header) { + if ext[""] != "permessage-deflate" { + continue + } + compress = true + break + } + } + + var ( + netConn net.Conn + err error + ) + + h, ok := w.(http.Hijacker) + if !ok { + return u.returnError(w, r, http.StatusInternalServerError, "websocket: response does not implement http.Hijacker") + } + var brw *bufio.ReadWriter + netConn, brw, err = h.Hijack() + if err != nil { + return u.returnError(w, r, http.StatusInternalServerError, err.Error()) + } + + if brw.Reader.Buffered() > 0 { + netConn.Close() + return nil, errors.New("websocket: client sent data before handshake is complete") + } + + c := newConnBRW(netConn, true, u.ReadBufferSize, u.WriteBufferSize, brw) + c.subprotocol = subprotocol + + if compress { + c.newCompressionWriter = compressNoContextTakeover + c.newDecompressionReader = decompressNoContextTakeover + } + + p := c.writeBuf[:0] + p = append(p, "HTTP/1.1 101 Switching Protocols\r\nUpgrade: websocket\r\nConnection: Upgrade\r\nSec-WebSocket-Accept: "...) + p = append(p, computeAcceptKey(challengeKey)...) + p = append(p, "\r\n"...) + if c.subprotocol != "" { + p = append(p, "Sec-Websocket-Protocol: "...) + p = append(p, c.subprotocol...) + p = append(p, "\r\n"...) + } + if compress { + p = append(p, "Sec-Websocket-Extensions: permessage-deflate; server_no_context_takeover; client_no_context_takeover\r\n"...) + } + for k, vs := range responseHeader { + if k == "Sec-Websocket-Protocol" { + continue + } + for _, v := range vs { + p = append(p, k...) + p = append(p, ": "...) + for i := 0; i < len(v); i++ { + b := v[i] + if b <= 31 { + // prevent response splitting. + b = ' ' + } + p = append(p, b) + } + p = append(p, "\r\n"...) + } + } + p = append(p, "\r\n"...) + + // Clear deadlines set by HTTP server. + netConn.SetDeadline(time.Time{}) + + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Now().Add(u.HandshakeTimeout)) + } + if _, err = netConn.Write(p); err != nil { + netConn.Close() + return nil, err + } + if u.HandshakeTimeout > 0 { + netConn.SetWriteDeadline(time.Time{}) + } + + return c, nil +} + +// Upgrade upgrades the HTTP server connection to the WebSocket protocol. +// +// Deprecated: Use websocket.Upgrader instead. +// +// Upgrade does not perform origin checking. The application is responsible for +// checking the Origin header before calling Upgrade. An example implementation +// of the same origin policy check is: +// +// if req.Header.Get("Origin") != "http://"+req.Host { +// http.Error(w, "Origin not allowed", 403) +// return +// } +// +// If the endpoint supports subprotocols, then the application is responsible +// for negotiating the protocol used on the connection. Use the Subprotocols() +// function to get the subprotocols requested by the client. Use the +// Sec-Websocket-Protocol response header to specify the subprotocol selected +// by the application. +// +// The responseHeader is included in the response to the client's upgrade +// request. Use the responseHeader to specify cookies (Set-Cookie) and the +// negotiated subprotocol (Sec-Websocket-Protocol). +// +// The connection buffers IO to the underlying network connection. The +// readBufSize and writeBufSize parameters specify the size of the buffers to +// use. Messages can be larger than the buffers. +// +// If the request is not a valid WebSocket handshake, then Upgrade returns an +// error of type HandshakeError. Applications should handle this error by +// replying to the client with an HTTP error response. +func Upgrade(w http.ResponseWriter, r *http.Request, responseHeader http.Header, readBufSize, writeBufSize int) (*Conn, error) { + u := Upgrader{ReadBufferSize: readBufSize, WriteBufferSize: writeBufSize} + u.Error = func(w http.ResponseWriter, r *http.Request, status int, reason error) { + // don't return errors to maintain backwards compatibility + } + u.CheckOrigin = func(r *http.Request) bool { + // allow all connections by default + return true + } + return u.Upgrade(w, r, responseHeader) +} + +// Subprotocols returns the subprotocols requested by the client in the +// Sec-Websocket-Protocol header. +func Subprotocols(r *http.Request) []string { + h := strings.TrimSpace(r.Header.Get("Sec-Websocket-Protocol")) + if h == "" { + return nil + } + protocols := strings.Split(h, ",") + for i := range protocols { + protocols[i] = strings.TrimSpace(protocols[i]) + } + return protocols +} + +// IsWebSocketUpgrade returns true if the client requested upgrade to the +// WebSocket protocol. +func IsWebSocketUpgrade(r *http.Request) bool { + return tokenListContainsValue(r.Header, "Connection", "upgrade") && + tokenListContainsValue(r.Header, "Upgrade", "websocket") +} diff --git a/vendor/github.com/gorilla/websocket/util.go b/vendor/github.com/gorilla/websocket/util.go new file mode 100644 index 0000000..262e647 --- /dev/null +++ b/vendor/github.com/gorilla/websocket/util.go @@ -0,0 +1,214 @@ +// Copyright 2013 The Gorilla WebSocket Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package websocket + +import ( + "crypto/rand" + "crypto/sha1" + "encoding/base64" + "io" + "net/http" + "strings" +) + +var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11") + +func computeAcceptKey(challengeKey string) string { + h := sha1.New() + h.Write([]byte(challengeKey)) + h.Write(keyGUID) + return base64.StdEncoding.EncodeToString(h.Sum(nil)) +} + +func generateChallengeKey() (string, error) { + p := make([]byte, 16) + if _, err := io.ReadFull(rand.Reader, p); err != nil { + return "", err + } + return base64.StdEncoding.EncodeToString(p), nil +} + +// Octet types from RFC 2616. +var octetTypes [256]byte + +const ( + isTokenOctet = 1 << iota + isSpaceOctet +) + +func init() { + // From RFC 2616 + // + // OCTET = + // CHAR = + // CTL = + // CR = + // LF = + // SP = + // HT = + // <"> = + // CRLF = CR LF + // LWS = [CRLF] 1*( SP | HT ) + // TEXT = + // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> + // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT + // token = 1* + // qdtext = > + + for c := 0; c < 256; c++ { + var t byte + isCtl := c <= 31 || c == 127 + isChar := 0 <= c && c <= 127 + isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 + if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { + t |= isSpaceOctet + } + if isChar && !isCtl && !isSeparator { + t |= isTokenOctet + } + octetTypes[c] = t + } +} + +func skipSpace(s string) (rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isSpaceOctet == 0 { + break + } + } + return s[i:] +} + +func nextToken(s string) (token, rest string) { + i := 0 + for ; i < len(s); i++ { + if octetTypes[s[i]]&isTokenOctet == 0 { + break + } + } + return s[:i], s[i:] +} + +func nextTokenOrQuoted(s string) (value string, rest string) { + if !strings.HasPrefix(s, "\"") { + return nextToken(s) + } + s = s[1:] + for i := 0; i < len(s); i++ { + switch s[i] { + case '"': + return s[:i], s[i+1:] + case '\\': + p := make([]byte, len(s)-1) + j := copy(p, s[:i]) + escape := true + for i = i + 1; i < len(s); i++ { + b := s[i] + switch { + case escape: + escape = false + p[j] = b + j++ + case b == '\\': + escape = true + case b == '"': + return string(p[:j]), s[i+1:] + default: + p[j] = b + j++ + } + } + return "", "" + } + } + return "", "" +} + +// tokenListContainsValue returns true if the 1#token header with the given +// name contains token. +func tokenListContainsValue(header http.Header, name string, value string) bool { +headers: + for _, s := range header[name] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + s = skipSpace(s) + if s != "" && s[0] != ',' { + continue headers + } + if strings.EqualFold(t, value) { + return true + } + if s == "" { + continue headers + } + s = s[1:] + } + } + return false +} + +// parseExtensiosn parses WebSocket extensions from a header. +func parseExtensions(header http.Header) []map[string]string { + + // From RFC 6455: + // + // Sec-WebSocket-Extensions = extension-list + // extension-list = 1#extension + // extension = extension-token *( ";" extension-param ) + // extension-token = registered-token + // registered-token = token + // extension-param = token [ "=" (token | quoted-string) ] + // ;When using the quoted-string syntax variant, the value + // ;after quoted-string unescaping MUST conform to the + // ;'token' ABNF. + + var result []map[string]string +headers: + for _, s := range header["Sec-Websocket-Extensions"] { + for { + var t string + t, s = nextToken(skipSpace(s)) + if t == "" { + continue headers + } + ext := map[string]string{"": t} + for { + s = skipSpace(s) + if !strings.HasPrefix(s, ";") { + break + } + var k string + k, s = nextToken(skipSpace(s[1:])) + if k == "" { + continue headers + } + s = skipSpace(s) + var v string + if strings.HasPrefix(s, "=") { + v, s = nextTokenOrQuoted(skipSpace(s[1:])) + s = skipSpace(s) + } + if s != "" && s[0] != ',' && s[0] != ';' { + continue headers + } + ext[k] = v + } + if s != "" && s[0] != ',' { + continue headers + } + result = append(result, ext) + if s == "" { + continue headers + } + s = s[1:] + } + } + return result +} diff --git a/vendor/github.com/joho/godotenv/autoload/autoload.go b/vendor/github.com/joho/godotenv/autoload/autoload.go new file mode 100644 index 0000000..fbcd2bd --- /dev/null +++ b/vendor/github.com/joho/godotenv/autoload/autoload.go @@ -0,0 +1,15 @@ +package autoload + +/* + You can just read the .env file on import just by doing + + import _ "github.com/joho/godotenv/autoload" + + And bob's your mother's brother +*/ + +import "github.com/joho/godotenv" + +func init() { + godotenv.Load() +} diff --git a/vendor/github.com/joho/godotenv/godotenv.go b/vendor/github.com/joho/godotenv/godotenv.go new file mode 100644 index 0000000..f44a58d --- /dev/null +++ b/vendor/github.com/joho/godotenv/godotenv.go @@ -0,0 +1,253 @@ +// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be avaiable through os.Getenv("SOME_ENV_VAR") +package godotenv + +import ( + "bufio" + "errors" + "io" + "os" + "os/exec" + "strings" +) + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Load without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, false) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Overload will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main) +// +// If you call Overload without any args it will default to loading .env in the current path +// +// You can otherwise tell it which files to load (there can be more than one) like +// +// godotenv.Overload("fileone", "filetwo") +// +// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars. +func Overload(filenames ...string) (err error) { + filenames = filenamesOrDefault(filenames) + + for _, filename := range filenames { + err = loadFile(filename, true) + if err != nil { + return // return early on a spazout + } + } + return +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (envMap map[string]string, err error) { + filenames = filenamesOrDefault(filenames) + envMap = make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := readFile(filename) + + if individualErr != nil { + err = individualErr + return // return early on a spazout + } + + for key, value := range individualEnvMap { + envMap[key] = value + } + } + + return +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (envMap map[string]string, err error) { + envMap = make(map[string]string) + + var lines []string + scanner := bufio.NewScanner(r) + for scanner.Scan() { + lines = append(lines, scanner.Text()) + } + + if err = scanner.Err(); err != nil { + return + } + + for _, fullLine := range lines { + if !isIgnoredLine(fullLine) { + var key, value string + key, value, err = parseLine(fullLine) + + if err != nil { + return + } + envMap[key] = value + } + } + return +} + +// Exec loads env vars from the specified filenames (empty map falls back to default) +// then executes the cmd specified. +// +// Simply hooks up os.Stdin/err/out to the command and calls Run() +// +// If you want more fine grained control over your command it's recommended +// that you use `Load()` or `Read()` and the `os/exec` package yourself. +func Exec(filenames []string, cmd string, cmdArgs []string) error { + Load(filenames...) + + command := exec.Command(cmd, cmdArgs...) + command.Stdin = os.Stdin + command.Stdout = os.Stdout + command.Stderr = os.Stderr + return command.Run() +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := readFile(filename) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + os.Setenv(key, value) + } + } + + return nil +} + +func readFile(filename string) (envMap map[string]string, err error) { + file, err := os.Open(filename) + if err != nil { + return + } + defer file.Close() + + return Parse(file) +} + +func parseLine(line string) (key string, value string, err error) { + if len(line) == 0 { + err = errors.New("zero length string") + return + } + + // ditch the comments (but keep quoted hashes) + if strings.Contains(line, "#") { + segmentsBetweenHashes := strings.Split(line, "#") + quotesAreOpen := false + var segmentsToKeep []string + for _, segment := range segmentsBetweenHashes { + if strings.Count(segment, "\"") == 1 || strings.Count(segment, "'") == 1 { + if quotesAreOpen { + quotesAreOpen = false + segmentsToKeep = append(segmentsToKeep, segment) + } else { + quotesAreOpen = true + } + } + + if len(segmentsToKeep) == 0 || quotesAreOpen { + segmentsToKeep = append(segmentsToKeep, segment) + } + } + + line = strings.Join(segmentsToKeep, "#") + } + + // now split key from value + splitString := strings.SplitN(line, "=", 2) + + if len(splitString) != 2 { + // try yaml mode! + splitString = strings.SplitN(line, ":", 2) + } + + if len(splitString) != 2 { + err = errors.New("Can't separate key from value") + return + } + + // Parse the key + key = splitString[0] + if strings.HasPrefix(key, "export") { + key = strings.TrimPrefix(key, "export") + } + key = strings.Trim(key, " ") + + // Parse the value + value = splitString[1] + + // trim + value = strings.Trim(value, " ") + + // check if we've got quoted values + if value != "" { + first := string(value[0:1]) + last := string(value[len(value)-1:]) + if first == last && strings.ContainsAny(first, `"'`) { + // pull the quotes off the edges + value = strings.Trim(value, `"'`) + + // expand quotes + value = strings.Replace(value, `\"`, `"`, -1) + // expand newlines + value = strings.Replace(value, `\n`, "\n", -1) + } + } + + return +} + +func isIgnoredLine(line string) bool { + trimmedLine := strings.Trim(line, " \n\t") + return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#") +} diff --git a/vendor/github.com/justinian/dice/dice.go b/vendor/github.com/justinian/dice/dice.go new file mode 100644 index 0000000..d93ff84 --- /dev/null +++ b/vendor/github.com/justinian/dice/dice.go @@ -0,0 +1,55 @@ +package dice + +import ( + "errors" + "fmt" + "regexp" + "strings" +) + +type RollResult interface { + fmt.Stringer + Description() string + Int() int +} + +type roller interface { + Pattern() *regexp.Regexp + Roll([]string) (RollResult, error) +} + +type basicRollResult struct { + desc string +} + +func (r basicRollResult) Description() string { return strings.Trim(r.desc, " \t\r\n") } + +var rollHandlers []roller + +func addRollHandler(handler roller) { + rollHandlers = append(rollHandlers, handler) +} + +/* + open - regexp.MustCompile(`([0-9]+)d([0-9]+)(e)?o$`) + sil - regexp.MustCompile(`([0-9]+)d([0-9]+)(e)?s$`) +*/ + +func Roll(desc string) (RollResult, string, error) { + for _, rollHandler := range rollHandlers { + rollHandler.Pattern().Longest() + + if r := rollHandler.Pattern().FindStringSubmatch(desc); r != nil { + result, err := rollHandler.Roll(r) + if err != nil { + return nil, "", err + } + + indexes := rollHandler.Pattern().FindStringSubmatchIndex(desc) + reason := strings.Trim(desc[indexes[0]+len(r[0]):], " \t\r\n") + return result, reason, nil + } + } + + return nil, "", errors.New("Bad roll format: " + desc) +} diff --git a/vendor/github.com/justinian/dice/eote.go b/vendor/github.com/justinian/dice/eote.go new file mode 100644 index 0000000..d941df8 --- /dev/null +++ b/vendor/github.com/justinian/dice/eote.go @@ -0,0 +1,168 @@ +package dice + +import ( + "fmt" + "math/rand" + "regexp" + "strconv" + "strings" +) + +type EoteDie struct { + Type string + S int // success + A int // advantage + T int // triumph + D int // despair + F int // force +} + +func (d *EoteDie) Add(o EoteDie) { + d.S += o.S + d.A += o.A + d.T += o.T + d.D += o.D + d.F += o.F +} + +func (d EoteDie) String() string { + results := make([]string, 0, 3) + + if d.S > 0 { + results = append(results, strings.Repeat("s", d.S)) + } else if d.S < 0 { + results = append(results, strings.Repeat("f", -d.S)) + } + + if d.A > 0 { + results = append(results, strings.Repeat("a", d.A)) + } else if d.A < 0 { + results = append(results, strings.Repeat("d", -d.A)) + } + + if d.F > 0 { + results = append(results, strings.Repeat("L", d.F)) + } else if d.F < 0 { + results = append(results, strings.Repeat("D", -d.F)) + } + + if d.T > 0 { + results = append(results, strings.Repeat("T", d.T)) + } + + if d.D > 0 { + results = append(results, strings.Repeat("D", d.D)) + } + + return fmt.Sprintf("%s[%s]", d.Type, strings.Join(results, "")) +} + +type EoteResult struct { + basicRollResult + EoteDie + Rolls []EoteDie +} + +func (r EoteResult) String() string { + parts := make([]string, 0, 6) + + if r.S > 0 { + parts = append(parts, fmt.Sprintf("%d success", r.S)) + } else if r.S < 0 { + parts = append(parts, fmt.Sprintf("%d failure", -r.S)) + } + + if r.A > 0 { + parts = append(parts, fmt.Sprintf("%d advantage", r.A)) + } else if r.A < 0 { + parts = append(parts, fmt.Sprintf("%d disadvantage", -r.A)) + } + + if r.T > 0 { + parts = append(parts, fmt.Sprintf("(%d triumph)", r.T)) + } + + if r.D > 0 { + parts = append(parts, fmt.Sprintf("(%d despair)", r.D)) + } + + if r.F > 0 { + parts = append(parts, fmt.Sprintf("%d light", r.F)) + } else if r.F < 0 { + parts = append(parts, fmt.Sprintf("%d dark", -r.F)) + } + + if len(parts) < 1 { + parts = append(parts, "no result") + } + + rolls := make([]string, len(r.Rolls)) + for i := range r.Rolls { + rolls[i] = r.Rolls[i].String() + } + parts = append(parts, fmt.Sprintf("\n%s", strings.Join(rolls, " "))) + + return strings.Join(parts, " ") +} + +func (r EoteResult) Int() int { + return r.S +} + +var eoteDice = map[string][]EoteDie{ + "b": {{}, {}, {A: 1}, {A: 2}, {S: 1}, {S: 1, A: 1}}, + "blk": {{}, {}, {A: -1}, {A: -1}, {S: -1}, {S: -1}}, + "g": {{}, {A: 1}, {A: 1}, {A: 2}, {S: 1}, {S: 1}, {S: 1, A: 1}, {S: 2}}, + "p": {{}, {A: -1}, {A: -1}, {A: -1}, {A: -2}, {S: -1}, {S: -1, A: -1}, {S: -2}}, + "y": {{}, {A: 1}, {A: 2}, {A: 2}, {S: 1}, {S: 1}, {S: 1, A: 1}, {S: 1, A: 1}, {S: 1, A: 1}, {S: 2}, {S: 2}, {S: 1, T: 1}}, + "r": {{}, {A: -1}, {A: -1}, {A: -2}, {A: -2}, {S: -1}, {S: -1}, {S: -1, A: -1}, {S: -1, A: -1}, {S: -2}, {S: -2}, {S: -1, D: 1}}, + "w": {{F: -1}, {F: -1}, {F: -1}, {F: -1}, {F: -1}, {F: -1}, {F: -2}, {F: 1}, {F: 1}, {F: 2}, {F: 2}, {F: 2}}, +} + +type EoteRoller struct{} + +var eotePattern = regexp.MustCompile(`([0-9]+(?:r|b|blk|p|g|y|w)\s*)+($|\s)`) +var diePattern = regexp.MustCompile(`([0-9]+)(r|b|blk|p|g|y|w)`) + +func (EoteRoller) Pattern() *regexp.Regexp { return eotePattern } + +func (EoteRoller) Roll(matches []string) (RollResult, error) { + diePattern.Longest() + + res := EoteResult{basicRollResult: basicRollResult{matches[0]}} + + for _, die := range strings.Split(matches[0], " ") { + parts := diePattern.FindStringSubmatch(strings.Trim(die, " \t\r\n")) + if parts == nil { + continue + } + + num, err := strconv.ParseInt(parts[1], 10, 0) + if err != nil { + continue + } + + choices, ok := eoteDice[parts[2]] + if !ok { + continue + } + + for i := int64(0); i < num; i++ { + die := choices[rand.Intn(len(choices))] + res.Add(die) + res.Rolls = append(res.Rolls, die) + } + } + + return res, nil +} + +func init() { + for name := range eoteDice { + for i := range eoteDice[name] { + eoteDice[name][i].Type = name + } + } + + addRollHandler(EoteRoller{}) +} diff --git a/vendor/github.com/justinian/dice/fudge.go b/vendor/github.com/justinian/dice/fudge.go new file mode 100644 index 0000000..a0a872c --- /dev/null +++ b/vendor/github.com/justinian/dice/fudge.go @@ -0,0 +1,63 @@ +package dice + +import ( + "fmt" + "math/rand" + "regexp" + "sort" + "strconv" +) + +type FudgeRoller struct{} + +var fudgePattern = regexp.MustCompile(`([0-9]+)df?([+-][0-9]+)?($|\s)`) + +func (FudgeRoller) Pattern() *regexp.Regexp { return fudgePattern } + +type FudgeResult struct { + basicRollResult + Rolls []int + Total int +} + +func (r FudgeResult) String() string { + return fmt.Sprintf("%d %v", r.Total, r.Rolls) +} + +func (r FudgeResult) Int() int { + return r.Total +} + +func (FudgeRoller) Roll(matches []string) (RollResult, error) { + dice, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return nil, err + } + + result := FudgeResult{ + basicRollResult: basicRollResult{matches[0]}, + Rolls: make([]int, dice), + Total: 0, + } + + if matches[2] != "" { + bonus, err := strconv.ParseInt(matches[2], 10, 0) + if err != nil { + return nil, err + } + result.Total += int(bonus) + } + + for i := 0; i < len(result.Rolls); i++ { + result.Rolls[i] = rand.Intn(3) - 1 + result.Total += result.Rolls[i] + } + + sort.Ints(result.Rolls) + + return result, nil +} + +func init() { + addRollHandler(FudgeRoller{}) +} diff --git a/vendor/github.com/justinian/dice/std.go b/vendor/github.com/justinian/dice/std.go new file mode 100644 index 0000000..68f3d72 --- /dev/null +++ b/vendor/github.com/justinian/dice/std.go @@ -0,0 +1,105 @@ +package dice + +import ( + "fmt" + "math/rand" + "regexp" + "sort" + "strconv" +) + +type StdRoller struct{} + +var stdPattern = regexp.MustCompile(`([0-9]+)d([0-9]+)((k|d|kh|dl|kl|dh)([0-9]+))?([+-][0-9]+)?($|\s)`) + +func (StdRoller) Pattern() *regexp.Regexp { return stdPattern } + +type StdResult struct { + basicRollResult + Rolls []int + Dropped []int + Total int +} + +func (r StdResult) String() string { + return fmt.Sprintf("%d %v (%v)", r.Total, r.Rolls, r.Dropped) +} + +func (r StdResult) Int() int { + return r.Total +} + +func (StdRoller) Roll(matches []string) (RollResult, error) { + dice, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return nil, err + } + + sides, err := strconv.ParseInt(matches[2], 10, 0) + if err != nil { + return nil, err + } + + keep := "" + num := 0 + if matches[4] != "" { + number, err := strconv.ParseInt(matches[5], 10, 0) + if err != nil { + return nil, err + } + num = int(number) + keep = matches[4] + } + + result := StdResult{ + basicRollResult: basicRollResult{matches[0]}, + Rolls: make([]int, dice), + Dropped: nil, + Total: 0, + } + + if matches[6] != "" { + bonus, err := strconv.ParseInt(matches[6], 10, 0) + if err != nil { + return nil, err + } + result.Total += int(bonus) + } + + for i := 0; i < len(result.Rolls); i++ { + roll := rand.Intn(int(sides)) + 1 + result.Rolls[i] = roll + } + + sort.Ints(result.Rolls) + size := len(result.Rolls) + + switch keep { + case "k": + fallthrough + case "kh": + result.Dropped = result.Rolls[:size-num] + result.Rolls = result.Rolls[size-num:] + case "d": + fallthrough + case "dl": + result.Dropped = result.Rolls[:num] + result.Rolls = result.Rolls[num:] + case "kl": + result.Dropped = result.Rolls[num:] + result.Rolls = result.Rolls[:num] + case "dh": + result.Dropped = result.Rolls[size-num:] + result.Rolls = result.Rolls[:size-num] + } + + for i := 0; i < len(result.Rolls); i++ { + result.Total += result.Rolls[i] + } + + return result, nil +} + +func init() { + addRollHandler(StdRoller{}) +} diff --git a/vendor/github.com/justinian/dice/versus.go b/vendor/github.com/justinian/dice/versus.go new file mode 100644 index 0000000..f6f3c1c --- /dev/null +++ b/vendor/github.com/justinian/dice/versus.go @@ -0,0 +1,92 @@ +package dice + +import ( + "errors" + "fmt" + "math/rand" + "regexp" + "strconv" +) + +type VsRoller struct{} + +var vsPattern = regexp.MustCompile(`([0-9]+)d([0-9]+)(e|r)?v([0-9]+)($|\s)`) + +func (VsRoller) Pattern() *regexp.Regexp { return vsPattern } + +type VsResult struct { + basicRollResult + Rolls []int + Successes int +} + +func (r VsResult) Description() string { return r.desc } + +func (r VsResult) String() string { + return fmt.Sprintf("%d %v", r.Successes, r.Rolls) +} + +func (r VsResult) Int() int { + return r.Successes +} + +func (VsRoller) Roll(matches []string) (RollResult, error) { + dice, err := strconv.ParseInt(matches[1], 10, 0) + if err != nil { + return nil, err + } + if dice < 1 { + return nil, errors.New("Count must be 1 or more") + } + + sides, err := strconv.ParseInt(matches[2], 10, 0) + if err != nil { + return nil, err + } + if sides < 2 { + return nil, errors.New("Sides must be 2 or more") + } + + explode := matches[3] == "e" + reroll := matches[3] == "r" + + target, err := strconv.ParseInt(matches[4], 10, 0) + if err != nil { + return nil, err + } + + result := VsResult{ + basicRollResult: basicRollResult{matches[0]}, + Rolls: make([]int, 0, dice), + Successes: 0, + } + + for i := int64(0); i < dice; i++ { + roll := rand.Intn(int(sides)) + 1 + + if roll == int(sides) && explode { + total := roll + for roll == int(sides) { + roll = rand.Intn(int(sides)) + 1 + total += roll + } + roll = total + } + + if roll == int(sides) && reroll { + i-- + } + + if roll >= int(target) { + result.Successes += 1 + } + + result.Rolls = append(result.Rolls, roll) + } + + return result, nil +} + +func init() { + addRollHandler(VsRoller{}) +} diff --git a/vendor/github.com/kardianos/osext/osext.go b/vendor/github.com/kardianos/osext/osext.go new file mode 100644 index 0000000..17f380f --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext.go @@ -0,0 +1,33 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Extensions to the standard "os" package. +package osext // import "github.com/kardianos/osext" + +import "path/filepath" + +var cx, ce = executableClean() + +func executableClean() (string, error) { + p, err := executable() + return filepath.Clean(p), err +} + +// Executable returns an absolute path that can be used to +// re-invoke the current program. +// It may not be valid after the current program exits. +func Executable() (string, error) { + return cx, ce +} + +// Returns same path as Executable, returns just the folder +// path. Excludes the executable name and any trailing slash. +func ExecutableFolder() (string, error) { + p, err := Executable() + if err != nil { + return "", err + } + + return filepath.Dir(p), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_go18.go b/vendor/github.com/kardianos/osext/osext_go18.go new file mode 100644 index 0000000..009d8a9 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_go18.go @@ -0,0 +1,9 @@ +//+build go1.8,!openbsd + +package osext + +import "os" + +func executable() (string, error) { + return os.Executable() +} diff --git a/vendor/github.com/kardianos/osext/osext_plan9.go b/vendor/github.com/kardianos/osext/osext_plan9.go new file mode 100644 index 0000000..95e2371 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_plan9.go @@ -0,0 +1,22 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !go1.8 + +package osext + +import ( + "os" + "strconv" + "syscall" +) + +func executable() (string, error) { + f, err := os.Open("/proc/" + strconv.Itoa(os.Getpid()) + "/text") + if err != nil { + return "", err + } + defer f.Close() + return syscall.Fd2path(int(f.Fd())) +} diff --git a/vendor/github.com/kardianos/osext/osext_procfs.go b/vendor/github.com/kardianos/osext/osext_procfs.go new file mode 100644 index 0000000..e1f16f8 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_procfs.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8,android !go1.8,linux !go1.8,netbsd !go1.8,solaris !go1.8,dragonfly + +package osext + +import ( + "errors" + "fmt" + "os" + "runtime" + "strings" +) + +func executable() (string, error) { + switch runtime.GOOS { + case "linux", "android": + const deletedTag = " (deleted)" + execpath, err := os.Readlink("/proc/self/exe") + if err != nil { + return execpath, err + } + execpath = strings.TrimSuffix(execpath, deletedTag) + execpath = strings.TrimPrefix(execpath, deletedTag) + return execpath, nil + case "netbsd": + return os.Readlink("/proc/curproc/exe") + case "dragonfly": + return os.Readlink("/proc/curproc/file") + case "solaris": + return os.Readlink(fmt.Sprintf("/proc/%d/path/a.out", os.Getpid())) + } + return "", errors.New("ExecPath not implemented for " + runtime.GOOS) +} diff --git a/vendor/github.com/kardianos/osext/osext_sysctl.go b/vendor/github.com/kardianos/osext/osext_sysctl.go new file mode 100644 index 0000000..33cee25 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_sysctl.go @@ -0,0 +1,126 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.8,darwin !go1.8,freebsd openbsd + +package osext + +import ( + "os" + "os/exec" + "path/filepath" + "runtime" + "syscall" + "unsafe" +) + +var initCwd, initCwdErr = os.Getwd() + +func executable() (string, error) { + var mib [4]int32 + switch runtime.GOOS { + case "freebsd": + mib = [4]int32{1 /* CTL_KERN */, 14 /* KERN_PROC */, 12 /* KERN_PROC_PATHNAME */, -1} + case "darwin": + mib = [4]int32{1 /* CTL_KERN */, 38 /* KERN_PROCARGS */, int32(os.Getpid()), -1} + case "openbsd": + mib = [4]int32{1 /* CTL_KERN */, 55 /* KERN_PROC_ARGS */, int32(os.Getpid()), 1 /* KERN_PROC_ARGV */} + } + + n := uintptr(0) + // Get length. + _, _, errNum := syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, 0, uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + buf := make([]byte, n) + _, _, errNum = syscall.Syscall6(syscall.SYS___SYSCTL, uintptr(unsafe.Pointer(&mib[0])), 4, uintptr(unsafe.Pointer(&buf[0])), uintptr(unsafe.Pointer(&n)), 0, 0) + if errNum != 0 { + return "", errNum + } + if n == 0 { // This shouldn't happen. + return "", nil + } + + var execPath string + switch runtime.GOOS { + case "openbsd": + // buf now contains **argv, with pointers to each of the C-style + // NULL terminated arguments. + var args []string + argv := uintptr(unsafe.Pointer(&buf[0])) + Loop: + for { + argp := *(**[1 << 20]byte)(unsafe.Pointer(argv)) + if argp == nil { + break + } + for i := 0; uintptr(i) < n; i++ { + // we don't want the full arguments list + if string(argp[i]) == " " { + break Loop + } + if argp[i] != 0 { + continue + } + args = append(args, string(argp[:i])) + n -= uintptr(i) + break + } + if n < unsafe.Sizeof(argv) { + break + } + argv += unsafe.Sizeof(argv) + n -= unsafe.Sizeof(argv) + } + execPath = args[0] + // There is no canonical way to get an executable path on + // OpenBSD, so check PATH in case we are called directly + if execPath[0] != '/' && execPath[0] != '.' { + execIsInPath, err := exec.LookPath(execPath) + if err == nil { + execPath = execIsInPath + } + } + default: + for i, v := range buf { + if v == 0 { + buf = buf[:i] + break + } + } + execPath = string(buf) + } + + var err error + // execPath will not be empty due to above checks. + // Try to get the absolute path if the execPath is not rooted. + if execPath[0] != '/' { + execPath, err = getAbs(execPath) + if err != nil { + return execPath, err + } + } + // For darwin KERN_PROCARGS may return the path to a symlink rather than the + // actual executable. + if runtime.GOOS == "darwin" { + if execPath, err = filepath.EvalSymlinks(execPath); err != nil { + return execPath, err + } + } + return execPath, nil +} + +func getAbs(execPath string) (string, error) { + if initCwdErr != nil { + return execPath, initCwdErr + } + // The execPath may begin with a "../" or a "./" so clean it first. + // Join the two paths, trailing and starting slashes undetermined, so use + // the generic Join function. + return filepath.Join(initCwd, filepath.Clean(execPath)), nil +} diff --git a/vendor/github.com/kardianos/osext/osext_windows.go b/vendor/github.com/kardianos/osext/osext_windows.go new file mode 100644 index 0000000..074b3b3 --- /dev/null +++ b/vendor/github.com/kardianos/osext/osext_windows.go @@ -0,0 +1,36 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//+build !go1.8 + +package osext + +import ( + "syscall" + "unicode/utf16" + "unsafe" +) + +var ( + kernel = syscall.MustLoadDLL("kernel32.dll") + getModuleFileNameProc = kernel.MustFindProc("GetModuleFileNameW") +) + +// GetModuleFileName() with hModule = NULL +func executable() (exePath string, err error) { + return getModuleFileName() +} + +func getModuleFileName() (string, error) { + var n uint32 + b := make([]uint16, syscall.MAX_PATH) + size := uint32(len(b)) + + r0, _, e1 := getModuleFileNameProc.Call(0, uintptr(unsafe.Pointer(&b[0])), uintptr(size)) + n = uint32(r0) + if n == 0 { + return "", e1 + } + return string(utf16.Decode(b[0:n])), nil +} diff --git a/vendor/github.com/namsral/flag/extras.go b/vendor/github.com/namsral/flag/extras.go new file mode 100644 index 0000000..f937899 --- /dev/null +++ b/vendor/github.com/namsral/flag/extras.go @@ -0,0 +1,185 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package flag + +import ( + "bufio" + "os" + "strings" +) + +// EnvironmentPrefix defines a string that will be implicitely prefixed to a +// flag name before looking it up in the environment variables. +var EnvironmentPrefix = "" + +// ParseEnv parses flags from environment variables. +// Flags already set will be ignored. +func (f *FlagSet) ParseEnv(environ []string) error { + + m := f.formal + + env := make(map[string]string) + for _, s := range environ { + i := strings.Index(s, "=") + if i < 1 { + continue + } + env[s[0:i]] = s[i+1 : len(s)] + } + + for _, flag := range m { + name := flag.Name + _, set := f.actual[name] + if set { + continue + } + + flag, alreadythere := m[name] + if !alreadythere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return ErrHelp + } + return f.failf("environment variable provided but not defined: %s", name) + } + + envKey := strings.ToUpper(flag.Name) + if f.envPrefix != "" { + envKey = f.envPrefix + "_" + envKey + } + envKey = strings.Replace(envKey, "-", "_", -1) + + value, isSet := env[envKey] + if !isSet { + continue + } + + hasValue := false + if len(value) > 0 { + hasValue = true + } + + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return f.failf("invalid boolean value %q for environment variable %s: %v", value, name, err) + } + } else { + // flag without value is regarded a bool + fv.Set("true") + } + } else { + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid value %q for environment variable %s: %v", value, name, err) + } + } + + // update f.actual + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + + } + return nil +} + +// NewFlagSetWithEnvPrefix returns a new empty flag set with the specified name, +// environment variable prefix, and error handling property. +func NewFlagSetWithEnvPrefix(name string, prefix string, errorHandling ErrorHandling) *FlagSet { + f := NewFlagSet(name, errorHandling) + f.envPrefix = prefix + return f +} + +// DefaultConfigFlagname defines the flag name of the optional config file +// path. Used to lookup and parse the config file when a default is set and +// available on disk. +var DefaultConfigFlagname = "config" + +// ParseFile parses flags from the file in path. +// Same format as commandline argumens, newlines and lines beginning with a +// "#" charater are ignored. Flags already set will be ignored. +func (f *FlagSet) ParseFile(path string) error { + + // Extract arguments from file + fp, err := os.Open(path) + if err != nil { + return err + } + defer fp.Close() + + scanner := bufio.NewScanner(fp) + for scanner.Scan() { + line := scanner.Text() + + // Ignore empty lines + if len(line) == 0 { + continue + } + + // Ignore comments + if line[:1] == "#" { + continue + } + + // Match `key=value` and `key value` + var name, value string + hasValue := false + for i, v := range line { + if v == '=' || v == ' ' { + hasValue = true + name, value = line[:i], line[i+1:] + break + } + } + + if hasValue == false { + name = line + } + + // Ignore flag when already set; arguments have precedence over file + if f.actual[name] != nil { + continue + } + + m := f.formal + flag, alreadythere := m[name] + if !alreadythere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return ErrHelp + } + return f.failf("configuration variable provided but not defined: %s", name) + } + + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return f.failf("invalid boolean value %q for configuration variable %s: %v", value, name, err) + } + } else { + // flag without value is regarded a bool + fv.Set("true") + } + } else { + if err := flag.Value.Set(value); err != nil { + return f.failf("invalid value %q for configuration variable %s: %v", value, name, err) + } + } + + // update f.actual + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + } + + if err := scanner.Err(); err != nil { + return err + } + + return nil +} diff --git a/vendor/github.com/namsral/flag/flag.go b/vendor/github.com/namsral/flag/flag.go new file mode 100644 index 0000000..ac9d78a --- /dev/null +++ b/vendor/github.com/namsral/flag/flag.go @@ -0,0 +1,1015 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* + Package flag implements command-line flag parsing. + + Usage: + + Define flags using flag.String(), Bool(), Int(), etc. + + This declares an integer flag, -flagname, stored in the pointer ip, with type *int. + import "flag" + var ip = flag.Int("flagname", 1234, "help message for flagname") + If you like, you can bind the flag to a variable using the Var() functions. + var flagvar int + func init() { + flag.IntVar(&flagvar, "flagname", 1234, "help message for flagname") + } + Or you can create custom flags that satisfy the Value interface (with + pointer receivers) and couple them to flag parsing by + flag.Var(&flagVal, "name", "help message for flagname") + For such flags, the default value is just the initial value of the variable. + + After all flags are defined, call + flag.Parse() + to parse the command line into the defined flags. + + Flags may then be used directly. If you're using the flags themselves, + they are all pointers; if you bind to variables, they're values. + fmt.Println("ip has value ", *ip) + fmt.Println("flagvar has value ", flagvar) + + After parsing, the arguments following the flags are available as the + slice flag.Args() or individually as flag.Arg(i). + The arguments are indexed from 0 through flag.NArg()-1. + + Command line flag syntax: + -flag + -flag=x + -flag x // non-boolean flags only + One or two minus signs may be used; they are equivalent. + The last form is not permitted for boolean flags because the + meaning of the command + cmd -x * + will change if there is a file called 0, false, etc. You must + use the -flag=false form to turn off a boolean flag. + + Flag parsing stops just before the first non-flag argument + ("-" is a non-flag argument) or after the terminator "--". + + Integer flags accept 1234, 0664, 0x1234 and may be negative. + Boolean flags may be: + 1, 0, t, f, T, F, true, false, TRUE, FALSE, True, False + Duration flags accept any input valid for time.ParseDuration. + + The default set of command-line flags is controlled by + top-level functions. The FlagSet type allows one to define + independent sets of flags, such as to implement subcommands + in a command-line interface. The methods of FlagSet are + analogous to the top-level functions for the command-line + flag set. +*/ +package flag + +import ( + "errors" + "fmt" + "io" + "os" + "reflect" + "sort" + "strconv" + "strings" + "time" +) + +// ErrHelp is the error returned if the -help or -h flag is invoked +// but no such flag is defined. +var ErrHelp = errors.New("flag: help requested") + +// -- bool Value +type boolValue bool + +func newBoolValue(val bool, p *bool) *boolValue { + *p = val + return (*boolValue)(p) +} + +func (b *boolValue) Set(s string) error { + v, err := strconv.ParseBool(s) + *b = boolValue(v) + return err +} + +func (b *boolValue) Get() interface{} { return bool(*b) } + +func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } + +func (b *boolValue) IsBoolFlag() bool { return true } + +// optional interface to indicate boolean flags that can be +// supplied without "=value" text +type boolFlag interface { + Value + IsBoolFlag() bool +} + +// -- int Value +type intValue int + +func newIntValue(val int, p *int) *intValue { + *p = val + return (*intValue)(p) +} + +func (i *intValue) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = intValue(v) + return err +} + +func (i *intValue) Get() interface{} { return int(*i) } + +func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- int64 Value +type int64Value int64 + +func newInt64Value(val int64, p *int64) *int64Value { + *p = val + return (*int64Value)(p) +} + +func (i *int64Value) Set(s string) error { + v, err := strconv.ParseInt(s, 0, 64) + *i = int64Value(v) + return err +} + +func (i *int64Value) Get() interface{} { return int64(*i) } + +func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint Value +type uintValue uint + +func newUintValue(val uint, p *uint) *uintValue { + *p = val + return (*uintValue)(p) +} + +func (i *uintValue) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uintValue(v) + return err +} + +func (i *uintValue) Get() interface{} { return uint(*i) } + +func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } + +// -- uint64 Value +type uint64Value uint64 + +func newUint64Value(val uint64, p *uint64) *uint64Value { + *p = val + return (*uint64Value)(p) +} + +func (i *uint64Value) Set(s string) error { + v, err := strconv.ParseUint(s, 0, 64) + *i = uint64Value(v) + return err +} + +func (i *uint64Value) Get() interface{} { return uint64(*i) } + +func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } + +// -- string Value +type stringValue string + +func newStringValue(val string, p *string) *stringValue { + *p = val + return (*stringValue)(p) +} + +func (s *stringValue) Set(val string) error { + *s = stringValue(val) + return nil +} + +func (s *stringValue) Get() interface{} { return string(*s) } + +func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } + +// -- float64 Value +type float64Value float64 + +func newFloat64Value(val float64, p *float64) *float64Value { + *p = val + return (*float64Value)(p) +} + +func (f *float64Value) Set(s string) error { + v, err := strconv.ParseFloat(s, 64) + *f = float64Value(v) + return err +} + +func (f *float64Value) Get() interface{} { return float64(*f) } + +func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } + +// -- time.Duration Value +type durationValue time.Duration + +func newDurationValue(val time.Duration, p *time.Duration) *durationValue { + *p = val + return (*durationValue)(p) +} + +func (d *durationValue) Set(s string) error { + v, err := time.ParseDuration(s) + *d = durationValue(v) + return err +} + +func (d *durationValue) Get() interface{} { return time.Duration(*d) } + +func (d *durationValue) String() string { return (*time.Duration)(d).String() } + +// Value is the interface to the dynamic value stored in a flag. +// (The default value is represented as a string.) +// +// If a Value has an IsBoolFlag() bool method returning true, +// the command-line parser makes -name equivalent to -name=true +// rather than using the next command-line argument. +// +// Set is called once, in command line order, for each flag present. +type Value interface { + String() string + Set(string) error +} + +// Getter is an interface that allows the contents of a Value to be retrieved. +// It wraps the Value interface, rather than being part of it, because it +// appeared after Go 1 and its compatibility rules. All Value types provided +// by this package satisfy the Getter interface. +type Getter interface { + Value + Get() interface{} +} + +// ErrorHandling defines how FlagSet.Parse behaves if the parse fails. +type ErrorHandling int + +// These constants cause FlagSet.Parse to behave as described if the parse fails. +const ( + ContinueOnError ErrorHandling = iota // Return a descriptive error. + ExitOnError // Call os.Exit(2). + PanicOnError // Call panic with a descriptive error. +) + +// A FlagSet represents a set of defined flags. The zero value of a FlagSet +// has no name and has ContinueOnError error handling. +type FlagSet struct { + // Usage is the function called when an error occurs while parsing flags. + // The field is a function (not a method) that may be changed to point to + // a custom error handler. + Usage func() + + name string + parsed bool + actual map[string]*Flag + formal map[string]*Flag + envPrefix string // prefix to all env variable names + args []string // arguments after flags + errorHandling ErrorHandling + output io.Writer // nil means stderr; use out() accessor +} + +// A Flag represents the state of a flag. +type Flag struct { + Name string // name as it appears on command line + Usage string // help message + Value Value // value as set + DefValue string // default value (as text); for usage message +} + +// sortFlags returns the flags as a slice in lexicographical sorted order. +func sortFlags(flags map[string]*Flag) []*Flag { + list := make(sort.StringSlice, len(flags)) + i := 0 + for _, f := range flags { + list[i] = f.Name + i++ + } + list.Sort() + result := make([]*Flag, len(list)) + for i, name := range list { + result[i] = flags[name] + } + return result +} + +func (f *FlagSet) out() io.Writer { + if f.output == nil { + return os.Stderr + } + return f.output +} + +// SetOutput sets the destination for usage and error messages. +// If output is nil, os.Stderr is used. +func (f *FlagSet) SetOutput(output io.Writer) { + f.output = output +} + +// VisitAll visits the flags in lexicographical order, calling fn for each. +// It visits all flags, even those not set. +func (f *FlagSet) VisitAll(fn func(*Flag)) { + for _, flag := range sortFlags(f.formal) { + fn(flag) + } +} + +// VisitAll visits the command-line flags in lexicographical order, calling +// fn for each. It visits all flags, even those not set. +func VisitAll(fn func(*Flag)) { + CommandLine.VisitAll(fn) +} + +// Visit visits the flags in lexicographical order, calling fn for each. +// It visits only those flags that have been set. +func (f *FlagSet) Visit(fn func(*Flag)) { + for _, flag := range sortFlags(f.actual) { + fn(flag) + } +} + +// Visit visits the command-line flags in lexicographical order, calling fn +// for each. It visits only those flags that have been set. +func Visit(fn func(*Flag)) { + CommandLine.Visit(fn) +} + +// Lookup returns the Flag structure of the named flag, returning nil if none exists. +func (f *FlagSet) Lookup(name string) *Flag { + return f.formal[name] +} + +// Lookup returns the Flag structure of the named command-line flag, +// returning nil if none exists. +func Lookup(name string) *Flag { + return CommandLine.formal[name] +} + +// Set sets the value of the named flag. +func (f *FlagSet) Set(name, value string) error { + flag, ok := f.formal[name] + if !ok { + return fmt.Errorf("no such flag -%v", name) + } + err := flag.Value.Set(value) + if err != nil { + return err + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return nil +} + +// Set sets the value of the named command-line flag. +func Set(name, value string) error { + return CommandLine.Set(name, value) +} + +// isZeroValue guesses whether the string represents the zero +// value for a flag. It is not accurate but in practice works OK. +func isZeroValue(flag *Flag, value string) bool { + // Build a zero value of the flag's Value type, and see if the + // result of calling its String method equals the value passed in. + // This works unless the Value type is itself an interface type. + typ := reflect.TypeOf(flag.Value) + var z reflect.Value + if typ.Kind() == reflect.Ptr { + z = reflect.New(typ.Elem()) + } else { + z = reflect.Zero(typ) + } + if value == z.Interface().(Value).String() { + return true + } + + switch value { + case "false": + return true + case "": + return true + case "0": + return true + } + return false +} + +// UnquoteUsage extracts a back-quoted name from the usage +// string for a flag and returns it and the un-quoted usage. +// Given "a `name` to show" it returns ("name", "a name to show"). +// If there are no back quotes, the name is an educated guess of the +// type of the flag's value, or the empty string if the flag is boolean. +func UnquoteUsage(flag *Flag) (name string, usage string) { + // Look for a back-quoted name, but avoid the strings package. + usage = flag.Usage + for i := 0; i < len(usage); i++ { + if usage[i] == '`' { + for j := i + 1; j < len(usage); j++ { + if usage[j] == '`' { + name = usage[i+1 : j] + usage = usage[:i] + name + usage[j+1:] + return name, usage + } + } + break // Only one back quote; use type name. + } + } + // No explicit name, so use type if we can find one. + name = "value" + switch flag.Value.(type) { + case boolFlag: + name = "" + case *durationValue: + name = "duration" + case *float64Value: + name = "float" + case *intValue, *int64Value: + name = "int" + case *stringValue: + name = "string" + case *uintValue, *uint64Value: + name = "uint" + } + return +} + +// PrintDefaults prints to standard error the default values of all +// defined command-line flags in the set. See the documentation for +// the global function PrintDefaults for more information. +func (f *FlagSet) PrintDefaults() { + f.VisitAll(func(flag *Flag) { + s := fmt.Sprintf(" -%s", flag.Name) // Two spaces before -; see next two comments. + name, usage := UnquoteUsage(flag) + if len(name) > 0 { + s += " " + name + } + // Boolean flags of one ASCII letter are so common we + // treat them specially, putting their usage on the same line. + if len(s) <= 4 { // space, space, '-', 'x'. + s += "\t" + } else { + // Four spaces before the tab triggers good alignment + // for both 4- and 8-space tab stops. + s += "\n \t" + } + s += usage + if !isZeroValue(flag, flag.DefValue) { + if _, ok := flag.Value.(*stringValue); ok { + // put quotes on the value + s += fmt.Sprintf(" (default %q)", flag.DefValue) + } else { + s += fmt.Sprintf(" (default %v)", flag.DefValue) + } + } + fmt.Fprint(f.out(), s, "\n") + }) +} + +// PrintDefaults prints, to standard error unless configured otherwise, +// a usage message showing the default settings of all defined +// command-line flags. +// For an integer valued flag x, the default output has the form +// -x int +// usage-message-for-x (default 7) +// The usage message will appear on a separate line for anything but +// a bool flag with a one-byte name. For bool flags, the type is +// omitted and if the flag name is one byte the usage message appears +// on the same line. The parenthetical default is omitted if the +// default is the zero value for the type. The listed type, here int, +// can be changed by placing a back-quoted name in the flag's usage +// string; the first such item in the message is taken to be a parameter +// name to show in the message and the back quotes are stripped from +// the message when displayed. For instance, given +// flag.String("I", "", "search `directory` for include files") +// the output will be +// -I directory +// search directory for include files. +func PrintDefaults() { + CommandLine.PrintDefaults() +} + +// defaultUsage is the default function to print a usage message. +func defaultUsage(f *FlagSet) { + if f.name == "" { + fmt.Fprintf(f.out(), "Usage:\n") + } else { + fmt.Fprintf(f.out(), "Usage of %s:\n", f.name) + } + f.PrintDefaults() +} + +// NOTE: Usage is not just defaultUsage(CommandLine) +// because it serves (via godoc flag Usage) as the example +// for how to write your own usage function. + +// Usage prints to standard error a usage message documenting all defined command-line flags. +// It is called when an error occurs while parsing flags. +// The function is a variable that may be changed to point to a custom function. +// By default it prints a simple header and calls PrintDefaults; for details about the +// format of the output and how to control it, see the documentation for PrintDefaults. +var Usage = func() { + fmt.Fprintf(os.Stderr, "Usage of %s:\n", os.Args[0]) + PrintDefaults() +} + +// NFlag returns the number of flags that have been set. +func (f *FlagSet) NFlag() int { return len(f.actual) } + +// NFlag returns the number of command-line flags that have been set. +func NFlag() int { return len(CommandLine.actual) } + +// Arg returns the i'th argument. Arg(0) is the first remaining argument +// after flags have been processed. Arg returns an empty string if the +// requested element does not exist. +func (f *FlagSet) Arg(i int) string { + if i < 0 || i >= len(f.args) { + return "" + } + return f.args[i] +} + +// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument +// after flags have been processed. Arg returns an empty string if the +// requested element does not exist. +func Arg(i int) string { + return CommandLine.Arg(i) +} + +// NArg is the number of arguments remaining after flags have been processed. +func (f *FlagSet) NArg() int { return len(f.args) } + +// NArg is the number of arguments remaining after flags have been processed. +func NArg() int { return len(CommandLine.args) } + +// Args returns the non-flag arguments. +func (f *FlagSet) Args() []string { return f.args } + +// Args returns the non-flag command-line arguments. +func Args() []string { return CommandLine.args } + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) { + f.Var(newBoolValue(value, p), name, usage) +} + +// BoolVar defines a bool flag with specified name, default value, and usage string. +// The argument p points to a bool variable in which to store the value of the flag. +func BoolVar(p *bool, name string, value bool, usage string) { + CommandLine.Var(newBoolValue(value, p), name, usage) +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func (f *FlagSet) Bool(name string, value bool, usage string) *bool { + p := new(bool) + f.BoolVar(p, name, value, usage) + return p +} + +// Bool defines a bool flag with specified name, default value, and usage string. +// The return value is the address of a bool variable that stores the value of the flag. +func Bool(name string, value bool, usage string) *bool { + return CommandLine.Bool(name, value, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func (f *FlagSet) IntVar(p *int, name string, value int, usage string) { + f.Var(newIntValue(value, p), name, usage) +} + +// IntVar defines an int flag with specified name, default value, and usage string. +// The argument p points to an int variable in which to store the value of the flag. +func IntVar(p *int, name string, value int, usage string) { + CommandLine.Var(newIntValue(value, p), name, usage) +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func (f *FlagSet) Int(name string, value int, usage string) *int { + p := new(int) + f.IntVar(p, name, value, usage) + return p +} + +// Int defines an int flag with specified name, default value, and usage string. +// The return value is the address of an int variable that stores the value of the flag. +func Int(name string, value int, usage string) *int { + return CommandLine.Int(name, value, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func (f *FlagSet) Int64Var(p *int64, name string, value int64, usage string) { + f.Var(newInt64Value(value, p), name, usage) +} + +// Int64Var defines an int64 flag with specified name, default value, and usage string. +// The argument p points to an int64 variable in which to store the value of the flag. +func Int64Var(p *int64, name string, value int64, usage string) { + CommandLine.Var(newInt64Value(value, p), name, usage) +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func (f *FlagSet) Int64(name string, value int64, usage string) *int64 { + p := new(int64) + f.Int64Var(p, name, value, usage) + return p +} + +// Int64 defines an int64 flag with specified name, default value, and usage string. +// The return value is the address of an int64 variable that stores the value of the flag. +func Int64(name string, value int64, usage string) *int64 { + return CommandLine.Int64(name, value, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func (f *FlagSet) UintVar(p *uint, name string, value uint, usage string) { + f.Var(newUintValue(value, p), name, usage) +} + +// UintVar defines a uint flag with specified name, default value, and usage string. +// The argument p points to a uint variable in which to store the value of the flag. +func UintVar(p *uint, name string, value uint, usage string) { + CommandLine.Var(newUintValue(value, p), name, usage) +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func (f *FlagSet) Uint(name string, value uint, usage string) *uint { + p := new(uint) + f.UintVar(p, name, value, usage) + return p +} + +// Uint defines a uint flag with specified name, default value, and usage string. +// The return value is the address of a uint variable that stores the value of the flag. +func Uint(name string, value uint, usage string) *uint { + return CommandLine.Uint(name, value, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func (f *FlagSet) Uint64Var(p *uint64, name string, value uint64, usage string) { + f.Var(newUint64Value(value, p), name, usage) +} + +// Uint64Var defines a uint64 flag with specified name, default value, and usage string. +// The argument p points to a uint64 variable in which to store the value of the flag. +func Uint64Var(p *uint64, name string, value uint64, usage string) { + CommandLine.Var(newUint64Value(value, p), name, usage) +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func (f *FlagSet) Uint64(name string, value uint64, usage string) *uint64 { + p := new(uint64) + f.Uint64Var(p, name, value, usage) + return p +} + +// Uint64 defines a uint64 flag with specified name, default value, and usage string. +// The return value is the address of a uint64 variable that stores the value of the flag. +func Uint64(name string, value uint64, usage string) *uint64 { + return CommandLine.Uint64(name, value, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func (f *FlagSet) StringVar(p *string, name string, value string, usage string) { + f.Var(newStringValue(value, p), name, usage) +} + +// StringVar defines a string flag with specified name, default value, and usage string. +// The argument p points to a string variable in which to store the value of the flag. +func StringVar(p *string, name string, value string, usage string) { + CommandLine.Var(newStringValue(value, p), name, usage) +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func (f *FlagSet) String(name string, value string, usage string) *string { + p := new(string) + f.StringVar(p, name, value, usage) + return p +} + +// String defines a string flag with specified name, default value, and usage string. +// The return value is the address of a string variable that stores the value of the flag. +func String(name string, value string, usage string) *string { + return CommandLine.String(name, value, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func (f *FlagSet) Float64Var(p *float64, name string, value float64, usage string) { + f.Var(newFloat64Value(value, p), name, usage) +} + +// Float64Var defines a float64 flag with specified name, default value, and usage string. +// The argument p points to a float64 variable in which to store the value of the flag. +func Float64Var(p *float64, name string, value float64, usage string) { + CommandLine.Var(newFloat64Value(value, p), name, usage) +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func (f *FlagSet) Float64(name string, value float64, usage string) *float64 { + p := new(float64) + f.Float64Var(p, name, value, usage) + return p +} + +// Float64 defines a float64 flag with specified name, default value, and usage string. +// The return value is the address of a float64 variable that stores the value of the flag. +func Float64(name string, value float64, usage string) *float64 { + return CommandLine.Float64(name, value, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func (f *FlagSet) DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + f.Var(newDurationValue(value, p), name, usage) +} + +// DurationVar defines a time.Duration flag with specified name, default value, and usage string. +// The argument p points to a time.Duration variable in which to store the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func DurationVar(p *time.Duration, name string, value time.Duration, usage string) { + CommandLine.Var(newDurationValue(value, p), name, usage) +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func (f *FlagSet) Duration(name string, value time.Duration, usage string) *time.Duration { + p := new(time.Duration) + f.DurationVar(p, name, value, usage) + return p +} + +// Duration defines a time.Duration flag with specified name, default value, and usage string. +// The return value is the address of a time.Duration variable that stores the value of the flag. +// The flag accepts a value acceptable to time.ParseDuration. +func Duration(name string, value time.Duration, usage string) *time.Duration { + return CommandLine.Duration(name, value, usage) +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func (f *FlagSet) Var(value Value, name string, usage string) { + // Remember the default value as a string; it won't change. + flag := &Flag{name, usage, value, value.String()} + _, alreadythere := f.formal[name] + if alreadythere { + var msg string + if f.name == "" { + msg = fmt.Sprintf("flag redefined: %s", name) + } else { + msg = fmt.Sprintf("%s flag redefined: %s", f.name, name) + } + fmt.Fprintln(f.out(), msg) + panic(msg) // Happens only if flags are declared with identical names + } + if f.formal == nil { + f.formal = make(map[string]*Flag) + } + f.formal[name] = flag +} + +// Var defines a flag with the specified name and usage string. The type and +// value of the flag are represented by the first argument, of type Value, which +// typically holds a user-defined implementation of Value. For instance, the +// caller could create a flag that turns a comma-separated string into a slice +// of strings by giving the slice the methods of Value; in particular, Set would +// decompose the comma-separated string into the slice. +func Var(value Value, name string, usage string) { + CommandLine.Var(value, name, usage) +} + +// failf prints to standard error a formatted error and usage message and +// returns the error. +func (f *FlagSet) failf(format string, a ...interface{}) error { + err := fmt.Errorf(format, a...) + fmt.Fprintln(f.out(), err) + f.usage() + return err +} + +// usage calls the Usage method for the flag set if one is specified, +// or the appropriate default usage function otherwise. +func (f *FlagSet) usage() { + if f.Usage == nil { + if f == CommandLine { + Usage() + } else { + defaultUsage(f) + } + } else { + f.Usage() + } +} + +// parseOne parses one flag. It reports whether a flag was seen. +func (f *FlagSet) parseOne() (bool, error) { + if len(f.args) == 0 { + return false, nil + } + s := f.args[0] + if len(s) == 0 || s[0] != '-' || len(s) == 1 { + return false, nil + } + numMinuses := 1 + if s[1] == '-' { + numMinuses++ + if len(s) == 2 { // "--" terminates the flags + f.args = f.args[1:] + return false, nil + } + } + name := s[numMinuses:] + if len(name) == 0 || name[0] == '-' || name[0] == '=' { + return false, f.failf("bad flag syntax: %s", s) + } + + // ignore go test flags + if strings.HasPrefix(name, "test.") { + return false, nil + } + + // it's a flag. does it have an argument? + f.args = f.args[1:] + hasValue := false + value := "" + for i := 1; i < len(name); i++ { // equals cannot be first + if name[i] == '=' { + value = name[i+1:] + hasValue = true + name = name[0:i] + break + } + } + m := f.formal + flag, alreadythere := m[name] // BUG + if !alreadythere { + if name == "help" || name == "h" { // special case for nice help message. + f.usage() + return false, ErrHelp + } + return false, f.failf("flag provided but not defined: -%s", name) + } + if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg + if hasValue { + if err := fv.Set(value); err != nil { + return false, f.failf("invalid boolean value %q for -%s: %v", value, name, err) + } + } else { + if err := fv.Set("true"); err != nil { + return false, f.failf("invalid boolean flag %s: %v", name, err) + } + } + } else { + // It must have a value, which might be the next argument. + if !hasValue && len(f.args) > 0 { + // value is the next arg + hasValue = true + value, f.args = f.args[0], f.args[1:] + } + if !hasValue { + return false, f.failf("flag needs an argument: -%s", name) + } + if err := flag.Value.Set(value); err != nil { + return false, f.failf("invalid value %q for flag -%s: %v", value, name, err) + } + } + if f.actual == nil { + f.actual = make(map[string]*Flag) + } + f.actual[name] = flag + return true, nil +} + +// Parse parses flag definitions from the argument list, which should not +// include the command name. Must be called after all flags in the FlagSet +// are defined and before flags are accessed by the program. +// The return value will be ErrHelp if -help or -h were set but not defined. +func (f *FlagSet) Parse(arguments []string) error { + f.parsed = true + f.args = arguments + for { + seen, err := f.parseOne() + if seen { + continue + } + if err == nil { + break + } + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + } + + // Parse environment variables + if err := f.ParseEnv(os.Environ()); err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + return err + } + + // Parse configuration from file + var cFile string + if cf := f.formal[DefaultConfigFlagname]; cf != nil { + cFile = cf.Value.String() + } + if cf := f.actual[DefaultConfigFlagname]; cf != nil { + cFile = cf.Value.String() + } + if cFile != "" { + if err := f.ParseFile(cFile); err != nil { + switch f.errorHandling { + case ContinueOnError: + return err + case ExitOnError: + os.Exit(2) + case PanicOnError: + panic(err) + } + return err + } + } + + return nil +} + +// Parsed reports whether f.Parse has been called. +func (f *FlagSet) Parsed() bool { + return f.parsed +} + +// Parse parses the command-line flags from os.Args[1:]. Must be called +// after all flags are defined and before flags are accessed by the program. +func Parse() { + // Ignore errors; CommandLine is set for ExitOnError. + CommandLine.Parse(os.Args[1:]) +} + +// Parsed reports whether the command-line flags have been parsed. +func Parsed() bool { + return CommandLine.Parsed() +} + +// CommandLine is the default set of command-line flags, parsed from os.Args. +// The top-level functions such as BoolVar, Arg, and so on are wrappers for the +// methods of CommandLine. +var CommandLine = NewFlagSet(os.Args[0], ExitOnError) + +// NewFlagSet returns a new, empty flag set with the specified name and +// error handling property. +func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { + f := &FlagSet{ + name: name, + errorHandling: errorHandling, + } + return f +} + +// Init sets the name and error handling property for a flag set. +// By default, the zero FlagSet uses an empty name, EnvironmentPrefix, and the +// ContinueOnError error handling policy. +func (f *FlagSet) Init(name string, errorHandling ErrorHandling) { + f.name = name + f.envPrefix = EnvironmentPrefix + f.errorHandling = errorHandling +} diff --git a/vendor/github.com/nishanths/go-xkcd/client.go b/vendor/github.com/nishanths/go-xkcd/client.go new file mode 100644 index 0000000..cea86be --- /dev/null +++ b/vendor/github.com/nishanths/go-xkcd/client.go @@ -0,0 +1,62 @@ +package xkcd + +import ( + "io" + "math/rand" + "net/http" + "time" +) + +func init() { + random = rand.New( + &lockedRandSource{ + src: rand.NewSource(time.Now().UnixNano()), + }, + ) +} + +// Client represents the HTTP client +// and any settings used to make requests +// to the xkcd API. +type Client struct { + HTTPClient *http.Client + Config +} + +// NewClient returns a Client configured with sane default +// values. +func NewClient() *Client { + return &Client{ + http.DefaultClient, + Config{ + UseHTTPS: true, + }, + } +} + +func (c *Client) baseURL() string { + protocol := "http://" + + if c.UseHTTPS { + protocol = "https://" + } + + return protocol + "xkcd.com" +} + +// do performs a http request. If there is no error, the caller is responsible +// for closing the returned response body. +func (c *Client) do(req *http.Request) (io.ReadCloser, error) { + res, err := c.HTTPClient.Do(req) + + if err != nil { + return nil, err + } + + if res.StatusCode != 200 { + res.Body.Close() + return nil, newStatusError(res.StatusCode) + } + + return res.Body, nil +} diff --git a/vendor/github.com/nishanths/go-xkcd/comics.go b/vendor/github.com/nishanths/go-xkcd/comics.go new file mode 100644 index 0000000..fc8e4ec --- /dev/null +++ b/vendor/github.com/nishanths/go-xkcd/comics.go @@ -0,0 +1,153 @@ +package xkcd + +import ( + "encoding/json" + "fmt" + "net/http" + "strconv" + "time" +) + +type comicResponse struct { + Alt string `json:"alt"` + Day string `json:"day"` + ImageURL string `json:"img"` + URL string `json:"link"` + Month string `json:"month"` + News string `json:"news"` + Number int `json:"num"` + SafeTitle string `json:"safe_title"` + Title string `json:"title"` + Transcript string `json:"transcript"` + Year string `json:"year"` +} + +// Comic represents information and metadata +// about a single xkcd comic. +type Comic struct { + Alt string + PublishDate time.Time + ImageURL string + URL string + News string + Number int + SafeTitle string + Title string + Transcript string +} + +// UnmarshalJSON unmarshals the response from the xkcd enpoint +func (comic *Comic) UnmarshalJSON(data []byte) error { + var aux comicResponse + if err := json.Unmarshal(data, &aux); err != nil { + return err + } + + date, err := time.Parse("2006/1/2", aux.Year+"/"+aux.Month+"/"+aux.Day) + if err != nil { + return err + } + + comic.Alt = aux.Alt + comic.PublishDate = date + comic.ImageURL = aux.ImageURL + comic.URL = aux.URL + comic.Number = aux.Number + comic.News = aux.News + comic.SafeTitle = aux.SafeTitle + comic.Title = aux.Title + comic.Transcript = aux.Transcript + + return nil +} + +func (c *Client) doComicRequest(path string) (Comic, error) { + var comic Comic + + req, err := http.NewRequest("GET", c.baseURL()+path, nil) + if err != nil { + return comic, err + } + + body, err := c.do(req) + if err != nil { + return comic, err + } + defer body.Close() + + if err := json.NewDecoder(body).Decode(&comic); err != nil { + return comic, err + } + return comic, nil +} + +// Latest returns the latest comic's information +func (c *Client) Latest() (Comic, error) { + return c.doComicRequest("/info.0.json") +} + +// Get returns the comic for the specified number. +// The number is the comic number in the xkcd.com url. +// For example: https://xkcd.com/193. +func (c *Client) Get(number int) (Comic, error) { + numStr := strconv.Itoa(number) + return c.doComicRequest("/" + numStr + "/info.0.json") +} + +func (c *Client) getLatestComicNumber() (int, error) { + comic, err := c.Latest() + if err != nil { + return 0, err + } + return comic.Number, nil +} + +// Random returns a random comic. +// The underlying random number generator's behavior may not match +// the behavior of the Random button on xkcd.com. +// Random never performs a request for a non-existent comic number. +// +// Also see: RandomInRange() +func (c *Client) Random() (Comic, error) { + return c.RandomInRange(-1, -1, -1) +} + +// RandomInRange returns a random comic using the given options. +// The underlying random number generator's behavior may not match +// the behavior of the Random button on xkcd.com. +// +// [begin, end) specify the range that the randomly chosen comic number +// can be in. If begin equals -1, begin defaults to +// the number of the first comic. Likewise, if end equals +// -1, end defaults to the number of the latest comic + 1. +// +// latest specfies the number of the latest xkcd comic. Specifying the +// number eliminates the overhead of performing an additional HTTP request +// to find this number. Pass in -1 to let RandomInRange() find the latest +// comic number by performing the additional request. +func (c *Client) RandomInRange(begin, end, latest int) (comic Comic, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%s", r) + } + }() + + if begin == -1 { + begin = 1 // 1 is the first xkcd comic + } + + if latest == -1 { + latest, err = c.getLatestComicNumber() + if err != nil { + return + } + } + + if end == -1 { + end = latest + 1 + } + + number := randomInt(begin, end) + comic, err = c.Get(number) + return +} diff --git a/vendor/github.com/nishanths/go-xkcd/config.go b/vendor/github.com/nishanths/go-xkcd/config.go new file mode 100644 index 0000000..14024d0 --- /dev/null +++ b/vendor/github.com/nishanths/go-xkcd/config.go @@ -0,0 +1,7 @@ +package xkcd + +// Config represents the settings that a Client will use +// when making requests. +type Config struct { + UseHTTPS bool +} diff --git a/vendor/github.com/nishanths/go-xkcd/doc.go b/vendor/github.com/nishanths/go-xkcd/doc.go new file mode 100644 index 0000000..e31f2da --- /dev/null +++ b/vendor/github.com/nishanths/go-xkcd/doc.go @@ -0,0 +1,21 @@ +/* +package xkcd provides methods to make requests to the xkcd.com API. + +Example: + + client := xkcd.NewClient() + comic, err := client.Latest() + + if err != nil { + log.Fatal(err) + } + + fmt.Printf("%+v", comic) + + +All functions on Client are safe to use concurrently. The Random() and +RandomInRange() functions do not modify the global random number generator. + +More details on the xkcd API can be found at https://xkcd.com/json.html. +*/ +package xkcd diff --git a/vendor/github.com/nishanths/go-xkcd/errors.go b/vendor/github.com/nishanths/go-xkcd/errors.go new file mode 100644 index 0000000..ba3c1b4 --- /dev/null +++ b/vendor/github.com/nishanths/go-xkcd/errors.go @@ -0,0 +1,25 @@ +package xkcd + +import ( + "fmt" + "net/http" +) + +// StatusError specifies the status code and status text +// for error responses from the xkcd API endpoint. +type StatusError struct { + StatusCode int + StatusText string +} + +func newStatusError(code int) StatusError { + return StatusError{ + StatusCode: code, + StatusText: http.StatusText(code), + } +} + +// Error returns a string representation of the StatusError. +func (e StatusError) Error() string { + return fmt.Sprintf("%d: %s", e.StatusCode, e.StatusText) +} diff --git a/vendor/github.com/nishanths/go-xkcd/random.go b/vendor/github.com/nishanths/go-xkcd/random.go new file mode 100644 index 0000000..c4c074a --- /dev/null +++ b/vendor/github.com/nishanths/go-xkcd/random.go @@ -0,0 +1,32 @@ +package xkcd + +import ( + "math/rand" + "sync" +) + +var random *rand.Rand + +type lockedRandSource struct { + lock sync.Mutex + src rand.Source +} + +// to satisfy rand.Source interface +func (r *lockedRandSource) Int63() int64 { + r.lock.Lock() + ret := r.src.Int63() + r.lock.Unlock() + return ret +} + +// to satisfy rand.Source interface +func (r *lockedRandSource) Seed(seed int64) { + r.lock.Lock() + r.src.Seed(seed) + r.lock.Unlock() +} + +func randomInt(begin, end int) int { + return random.Intn(end-begin) + begin +} diff --git a/vendor/github.com/opentracing-contrib/go-observer/observer.go b/vendor/github.com/opentracing-contrib/go-observer/observer.go new file mode 100644 index 0000000..c8cbf61 --- /dev/null +++ b/vendor/github.com/opentracing-contrib/go-observer/observer.go @@ -0,0 +1,39 @@ +// This project is licensed under the Apache License 2.0, see LICENSE. + +package otobserver + +import opentracing "github.com/opentracing/opentracing-go" + +// Observer can be registered with a Tracer to recieve notifications +// about new Spans. Tracers are not required to support the Observer API. +// The actual registration depends on the implementation, which might look +// like the below e.g : +// observer := myobserver.NewObserver() +// tracer := client.NewTracer(..., client.WithObserver(observer)) +// +type Observer interface { + // Create and return a span observer. Called when a span starts. + // If the Observer is not interested in the given span, it must return (nil, false). + // E.g : + // func StartSpan(opName string, opts ...opentracing.StartSpanOption) { + // var sp opentracing.Span + // sso := opentracing.StartSpanOptions{} + // spanObserver, ok := observer.OnStartSpan(span, opName, sso); + // if ok { + // // we have a valid SpanObserver + // } + // ... + // } + OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (SpanObserver, bool) +} + +// SpanObserver is created by the Observer and receives notifications about +// other Span events. +type SpanObserver interface { + // Callback called from opentracing.Span.SetOperationName() + OnSetOperationName(operationName string) + // Callback called from opentracing.Span.SetTag() + OnSetTag(key string, value interface{}) + // Callback called from opentracing.Span.Finish() + OnFinish(options opentracing.FinishOptions) +} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go new file mode 100644 index 0000000..8800129 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/ext/tags.go @@ -0,0 +1,210 @@ +package ext + +import opentracing "github.com/opentracing/opentracing-go" + +// These constants define common tag names recommended for better portability across +// tracing systems and languages/platforms. +// +// The tag names are defined as typed strings, so that in addition to the usual use +// +// span.setTag(TagName, value) +// +// they also support value type validation via this additional syntax: +// +// TagName.Set(span, value) +// +var ( + ////////////////////////////////////////////////////////////////////// + // SpanKind (client/server or producer/consumer) + ////////////////////////////////////////////////////////////////////// + + // SpanKind hints at relationship between spans, e.g. client/server + SpanKind = spanKindTagName("span.kind") + + // SpanKindRPCClient marks a span representing the client-side of an RPC + // or other remote call + SpanKindRPCClientEnum = SpanKindEnum("client") + SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} + + // SpanKindRPCServer marks a span representing the server-side of an RPC + // or other remote call + SpanKindRPCServerEnum = SpanKindEnum("server") + SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} + + // SpanKindProducer marks a span representing the producer-side of a + // message bus + SpanKindProducerEnum = SpanKindEnum("producer") + SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} + + // SpanKindConsumer marks a span representing the consumer-side of a + // message bus + SpanKindConsumerEnum = SpanKindEnum("consumer") + SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} + + ////////////////////////////////////////////////////////////////////// + // Component name + ////////////////////////////////////////////////////////////////////// + + // Component is a low-cardinality identifier of the module, library, + // or package that is generating a span. + Component = stringTagName("component") + + ////////////////////////////////////////////////////////////////////// + // Sampling hint + ////////////////////////////////////////////////////////////////////// + + // SamplingPriority determines the priority of sampling this Span. + SamplingPriority = uint16TagName("sampling.priority") + + ////////////////////////////////////////////////////////////////////// + // Peer tags. These tags can be emitted by either client-side of + // server-side to describe the other side/service in a peer-to-peer + // communications, like an RPC call. + ////////////////////////////////////////////////////////////////////// + + // PeerService records the service name of the peer. + PeerService = stringTagName("peer.service") + + // PeerAddress records the address name of the peer. This may be a "ip:port", + // a bare "hostname", a FQDN or even a database DSN substring + // like "mysql://username@127.0.0.1:3306/dbname" + PeerAddress = stringTagName("peer.address") + + // PeerHostname records the host name of the peer + PeerHostname = stringTagName("peer.hostname") + + // PeerHostIPv4 records IP v4 host address of the peer + PeerHostIPv4 = ipv4Tag("peer.ipv4") + + // PeerHostIPv6 records IP v6 host address of the peer + PeerHostIPv6 = stringTagName("peer.ipv6") + + // PeerPort records port number of the peer + PeerPort = uint16TagName("peer.port") + + ////////////////////////////////////////////////////////////////////// + // HTTP Tags + ////////////////////////////////////////////////////////////////////// + + // HTTPUrl should be the URL of the request being handled in this segment + // of the trace, in standard URI format. The protocol is optional. + HTTPUrl = stringTagName("http.url") + + // HTTPMethod is the HTTP method of the request, and is case-insensitive. + HTTPMethod = stringTagName("http.method") + + // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the + // HTTP response. + HTTPStatusCode = uint16TagName("http.status_code") + + ////////////////////////////////////////////////////////////////////// + // DB Tags + ////////////////////////////////////////////////////////////////////// + + // DBInstance is database instance name. + DBInstance = stringTagName("db.instance") + + // DBStatement is a database statement for the given database type. + // It can be a query or a prepared statement (i.e., before substitution). + DBStatement = stringTagName("db.statement") + + // DBType is a database type. For any SQL database, "sql". + // For others, the lower-case database category, e.g. "redis" + DBType = stringTagName("db.type") + + // DBUser is a username for accessing database. + DBUser = stringTagName("db.user") + + ////////////////////////////////////////////////////////////////////// + // Message Bus Tag + ////////////////////////////////////////////////////////////////////// + + // MessageBusDestination is an address at which messages can be exchanged + MessageBusDestination = stringTagName("message_bus.destination") + + ////////////////////////////////////////////////////////////////////// + // Error Tag + ////////////////////////////////////////////////////////////////////// + + // Error indicates that operation represented by the span resulted in an error. + Error = boolTagName("error") +) + +// --- + +// SpanKindEnum represents common span types +type SpanKindEnum string + +type spanKindTagName string + +// Set adds a string tag to the `span` +func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { + span.SetTag(string(tag), value) +} + +type rpcServerOption struct { + clientContext opentracing.SpanContext +} + +func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { + if r.clientContext != nil { + opentracing.ChildOf(r.clientContext).Apply(o) + } + SpanKindRPCServer.Apply(o) +} + +// RPCServerOption returns a StartSpanOption appropriate for an RPC server span +// with `client` representing the metadata for the remote peer Span if available. +// In case client == nil, due to the client not being instrumented, this RPC +// server span will be a root span. +func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { + return rpcServerOption{client} +} + +// --- + +type stringTagName string + +// Set adds a string tag to the `span` +func (tag stringTagName) Set(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} + +// --- + +type uint32TagName string + +// Set adds a uint32 tag to the `span` +func (tag uint32TagName) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// --- + +type uint16TagName string + +// Set adds a uint16 tag to the `span` +func (tag uint16TagName) Set(span opentracing.Span, value uint16) { + span.SetTag(string(tag), value) +} + +// --- + +type boolTagName string + +// Add adds a bool tag to the `span` +func (tag boolTagName) Set(span opentracing.Span, value bool) { + span.SetTag(string(tag), value) +} + +type ipv4Tag string + +// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility +func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { + span.SetTag(string(tag), value) +} + +// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" +func (tag ipv4Tag) SetString(span opentracing.Span, value string) { + span.SetTag(string(tag), value) +} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 0000000..8c8e793 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,32 @@ +package opentracing + +var ( + globalTracer Tracer = NoopTracer{} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = tracer +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 0000000..222a652 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,57 @@ +package opentracing + +import "golang.org/x/net/context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// `span`'s SpanContext. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// startSpanFromContextWithTracer is factored out for testing purposes. +func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + var span Span + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + span = tracer.StartSpan(operationName, opts...) + } else { + span = tracer.StartSpan(operationName, opts...) + } + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 0000000..50feea3 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,269 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType + noopType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Noop creates a no-op log field that should be ignored by the tracer. +// It can be used to capture optional fields, for example those that should +// only be logged in non-production environment: +// +// func customerField(order *Order) log.Field { +// if os.Getenv("ENVIRONMENT") == "dev" { +// return log.String("customer", order.Customer.ID) +// } +// return log.Noop() +// } +// +// span.LogFields(log.String("event", "purchase"), customerField(order)) +// +func Noop() Field { + return Field{ + fieldType: noopType, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + case noopType: + // intentionally left blank + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + case noopType: + return nil + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 0000000..3832feb --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,54 @@ +package log + +import "fmt" + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 0000000..0d32f69 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} + defaultNoopTracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 0000000..0dd466a --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeadersCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span.Context(), opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HTTPHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Add(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 0000000..f6c3234 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,185 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 0000000..7bca1f7 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,305 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag{"user_agent", loggedReq.UserAgent}, + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go new file mode 100644 index 0000000..6fb7b8c --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go @@ -0,0 +1,234 @@ +package zipkintracer + +import ( + "bytes" + "net/http" + "sync" + "time" + + "github.com/apache/thrift/lib/go/thrift" + + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" +) + +// Default timeout for http request in seconds +const defaultHTTPTimeout = time.Second * 5 + +// defaultBatchInterval in seconds +const defaultHTTPBatchInterval = 1 + +const defaultHTTPBatchSize = 100 + +const defaultHTTPMaxBacklog = 1000 + +// HTTPCollector implements Collector by forwarding spans to a http server. +type HTTPCollector struct { + logger Logger + url string + client *http.Client + batchInterval time.Duration + batchSize int + maxBacklog int + batch []*zipkincore.Span + spanc chan *zipkincore.Span + quit chan struct{} + shutdown chan error + sendMutex *sync.Mutex + batchMutex *sync.Mutex + reqCallback RequestCallback +} + +// RequestCallback receives the initialized request from the Collector before +// sending it over the wire. This allows one to plug in additional headers or +// do other customization. +type RequestCallback func(*http.Request) + +// HTTPOption sets a parameter for the HttpCollector +type HTTPOption func(c *HTTPCollector) + +// HTTPLogger sets the logger used to report errors in the collection +// process. By default, a no-op logger is used, i.e. no errors are logged +// anywhere. It's important to set this option in a production service. +func HTTPLogger(logger Logger) HTTPOption { + return func(c *HTTPCollector) { c.logger = logger } +} + +// HTTPTimeout sets maximum timeout for http request. +func HTTPTimeout(duration time.Duration) HTTPOption { + return func(c *HTTPCollector) { c.client.Timeout = duration } +} + +// HTTPBatchSize sets the maximum batch size, after which a collect will be +// triggered. The default batch size is 100 traces. +func HTTPBatchSize(n int) HTTPOption { + return func(c *HTTPCollector) { c.batchSize = n } +} + +// HTTPMaxBacklog sets the maximum backlog size, +// when batch size reaches this threshold, spans from the +// beginning of the batch will be disposed +func HTTPMaxBacklog(n int) HTTPOption { + return func(c *HTTPCollector) { c.maxBacklog = n } +} + +// HTTPBatchInterval sets the maximum duration we will buffer traces before +// emitting them to the collector. The default batch interval is 1 second. +func HTTPBatchInterval(d time.Duration) HTTPOption { + return func(c *HTTPCollector) { c.batchInterval = d } +} + +// HTTPClient sets a custom http client to use. +func HTTPClient(client *http.Client) HTTPOption { + return func(c *HTTPCollector) { c.client = client } +} + +// HTTPRequestCallback registers a callback function to adjust the collector +// *http.Request before it sends the request to Zipkin. +func HTTPRequestCallback(rc RequestCallback) HTTPOption { + return func(c *HTTPCollector) { c.reqCallback = rc } +} + +// NewHTTPCollector returns a new HTTP-backend Collector. url should be a http +// url for handle post request. timeout is passed to http client. queueSize control +// the maximum size of buffer of async queue. The logger is used to log errors, +// such as send failures; +func NewHTTPCollector(url string, options ...HTTPOption) (Collector, error) { + c := &HTTPCollector{ + logger: NewNopLogger(), + url: url, + client: &http.Client{Timeout: defaultHTTPTimeout}, + batchInterval: defaultHTTPBatchInterval * time.Second, + batchSize: defaultHTTPBatchSize, + maxBacklog: defaultHTTPMaxBacklog, + batch: []*zipkincore.Span{}, + spanc: make(chan *zipkincore.Span), + quit: make(chan struct{}, 1), + shutdown: make(chan error, 1), + sendMutex: &sync.Mutex{}, + batchMutex: &sync.Mutex{}, + } + + for _, option := range options { + option(c) + } + + go c.loop() + return c, nil +} + +// Collect implements Collector. +func (c *HTTPCollector) Collect(s *zipkincore.Span) error { + c.spanc <- s + return nil +} + +// Close implements Collector. +func (c *HTTPCollector) Close() error { + close(c.quit) + return <-c.shutdown +} + +func httpSerialize(spans []*zipkincore.Span) *bytes.Buffer { + t := thrift.NewTMemoryBuffer() + p := thrift.NewTBinaryProtocolTransport(t) + if err := p.WriteListBegin(thrift.STRUCT, len(spans)); err != nil { + panic(err) + } + for _, s := range spans { + if err := s.Write(p); err != nil { + panic(err) + } + } + if err := p.WriteListEnd(); err != nil { + panic(err) + } + return t.Buffer +} + +func (c *HTTPCollector) loop() { + var ( + nextSend = time.Now().Add(c.batchInterval) + ticker = time.NewTicker(c.batchInterval / 10) + tickc = ticker.C + ) + defer ticker.Stop() + + for { + select { + case span := <-c.spanc: + currentBatchSize := c.append(span) + if currentBatchSize >= c.batchSize { + nextSend = time.Now().Add(c.batchInterval) + go c.send() + } + case <-tickc: + if time.Now().After(nextSend) { + nextSend = time.Now().Add(c.batchInterval) + go c.send() + } + case <-c.quit: + c.shutdown <- c.send() + return + } + } +} + +func (c *HTTPCollector) append(span *zipkincore.Span) (newBatchSize int) { + c.batchMutex.Lock() + defer c.batchMutex.Unlock() + + c.batch = append(c.batch, span) + if len(c.batch) > c.maxBacklog { + dispose := len(c.batch) - c.maxBacklog + c.logger.Log("msg", "backlog too long, disposing spans.", "count", dispose) + c.batch = c.batch[dispose:] + } + newBatchSize = len(c.batch) + return +} + +func (c *HTTPCollector) send() error { + // in order to prevent sending the same batch twice + c.sendMutex.Lock() + defer c.sendMutex.Unlock() + + // Select all current spans in the batch to be sent + c.batchMutex.Lock() + sendBatch := c.batch[:] + c.batchMutex.Unlock() + + // Do not send an empty batch + if len(sendBatch) == 0 { + return nil + } + + req, err := http.NewRequest( + "POST", + c.url, + httpSerialize(sendBatch)) + if err != nil { + c.logger.Log("err", err.Error()) + return err + } + req.Header.Set("Content-Type", "application/x-thrift") + if c.reqCallback != nil { + c.reqCallback(req) + } + resp, err := c.client.Do(req) + if err != nil { + c.logger.Log("err", err.Error()) + return err + } + resp.Body.Close() + // non 2xx code + if resp.StatusCode < 200 || resp.StatusCode >= 300 { + c.logger.Log("err", "HTTP POST span failed", "code", resp.Status) + } + + // Remove sent spans from the batch + c.batchMutex.Lock() + c.batch = c.batch[len(sendBatch):] + c.batchMutex.Unlock() + + return nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go new file mode 100644 index 0000000..eb18c3f --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go @@ -0,0 +1,95 @@ +package zipkintracer + +import ( + "github.com/Shopify/sarama" + "github.com/apache/thrift/lib/go/thrift" + + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" +) + +// defaultKafkaTopic sets the standard Kafka topic our Collector will publish +// on. The default topic for zipkin-receiver-kafka is "zipkin", see: +// https://github.com/openzipkin/zipkin/tree/master/zipkin-receiver-kafka +const defaultKafkaTopic = "zipkin" + +// KafkaCollector implements Collector by publishing spans to a Kafka +// broker. +type KafkaCollector struct { + producer sarama.AsyncProducer + logger Logger + topic string +} + +// KafkaOption sets a parameter for the KafkaCollector +type KafkaOption func(c *KafkaCollector) + +// KafkaLogger sets the logger used to report errors in the collection +// process. By default, a no-op logger is used, i.e. no errors are logged +// anywhere. It's important to set this option. +func KafkaLogger(logger Logger) KafkaOption { + return func(c *KafkaCollector) { c.logger = logger } +} + +// KafkaProducer sets the producer used to produce to Kafka. +func KafkaProducer(p sarama.AsyncProducer) KafkaOption { + return func(c *KafkaCollector) { c.producer = p } +} + +// KafkaTopic sets the kafka topic to attach the collector producer on. +func KafkaTopic(t string) KafkaOption { + return func(c *KafkaCollector) { c.topic = t } +} + +// NewKafkaCollector returns a new Kafka-backed Collector. addrs should be a +// slice of TCP endpoints of the form "host:port". +func NewKafkaCollector(addrs []string, options ...KafkaOption) (Collector, error) { + c := &KafkaCollector{ + logger: NewNopLogger(), + topic: defaultKafkaTopic, + } + + for _, option := range options { + option(c) + } + if c.producer == nil { + p, err := sarama.NewAsyncProducer(addrs, nil) + if err != nil { + return nil, err + } + c.producer = p + } + + go c.logErrors() + + return c, nil +} + +func (c *KafkaCollector) logErrors() { + for pe := range c.producer.Errors() { + _ = c.logger.Log("msg", pe.Msg, "err", pe.Err, "result", "failed to produce msg") + } +} + +// Collect implements Collector. +func (c *KafkaCollector) Collect(s *zipkincore.Span) error { + c.producer.Input() <- &sarama.ProducerMessage{ + Topic: c.topic, + Key: nil, + Value: sarama.ByteEncoder(kafkaSerialize(s)), + } + return nil +} + +// Close implements Collector. +func (c *KafkaCollector) Close() error { + return c.producer.Close() +} + +func kafkaSerialize(s *zipkincore.Span) []byte { + t := thrift.NewTMemoryBuffer() + p := thrift.NewTBinaryProtocolTransport(t) + if err := s.Write(p); err != nil { + panic(err) + } + return t.Buffer.Bytes() +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go new file mode 100644 index 0000000..1ff353a --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go @@ -0,0 +1,235 @@ +package zipkintracer + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "sync" + "time" + + "github.com/apache/thrift/lib/go/thrift" + + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe" + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" +) + +const defaultScribeCategory = "zipkin" + +// defaultScribeBatchInterval in seconds +const defaultScribeBatchInterval = 1 + +const defaultScribeBatchSize = 100 + +const defaultScribeMaxBacklog = 1000 + +// ScribeCollector implements Collector by forwarding spans to a Scribe +// service, in batches. +type ScribeCollector struct { + logger Logger + category string + factory func() (scribe.Scribe, error) + client scribe.Scribe + batchInterval time.Duration + batchSize int + maxBacklog int + batch []*scribe.LogEntry + spanc chan *zipkincore.Span + quit chan struct{} + shutdown chan error + sendMutex *sync.Mutex + batchMutex *sync.Mutex +} + +// ScribeOption sets a parameter for the StdlibAdapter. +type ScribeOption func(s *ScribeCollector) + +// ScribeLogger sets the logger used to report errors in the collection +// process. By default, a no-op logger is used, i.e. no errors are logged +// anywhere. It's important to set this option in a production service. +func ScribeLogger(logger Logger) ScribeOption { + return func(s *ScribeCollector) { s.logger = logger } +} + +// ScribeBatchSize sets the maximum batch size, after which a collect will be +// triggered. The default batch size is 100 traces. +func ScribeBatchSize(n int) ScribeOption { + return func(s *ScribeCollector) { s.batchSize = n } +} + +// ScribeMaxBacklog sets the maximum backlog size, +// when batch size reaches this threshold, spans from the +// beginning of the batch will be disposed +func ScribeMaxBacklog(n int) ScribeOption { + return func(c *ScribeCollector) { c.maxBacklog = n } +} + +// ScribeBatchInterval sets the maximum duration we will buffer traces before +// emitting them to the collector. The default batch interval is 1 second. +func ScribeBatchInterval(d time.Duration) ScribeOption { + return func(s *ScribeCollector) { s.batchInterval = d } +} + +// ScribeCategory sets the Scribe category used to transmit the spans. +func ScribeCategory(category string) ScribeOption { + return func(s *ScribeCollector) { s.category = category } +} + +// NewScribeCollector returns a new Scribe-backed Collector. addr should be a +// TCP endpoint of the form "host:port". timeout is passed to the Thrift dial +// function NewTSocketFromAddrTimeout. batchSize and batchInterval control the +// maximum size and interval of a batch of spans; as soon as either limit is +// reached, the batch is sent. The logger is used to log errors, such as batch +// send failures; users should provide an appropriate context, if desired. +func NewScribeCollector(addr string, timeout time.Duration, options ...ScribeOption) (Collector, error) { + factory := scribeClientFactory(addr, timeout) + client, err := factory() + if err != nil { + return nil, err + } + c := &ScribeCollector{ + logger: NewNopLogger(), + category: defaultScribeCategory, + factory: factory, + client: client, + batchInterval: defaultScribeBatchInterval * time.Second, + batchSize: defaultScribeBatchSize, + maxBacklog: defaultScribeMaxBacklog, + batch: []*scribe.LogEntry{}, + spanc: make(chan *zipkincore.Span), + quit: make(chan struct{}), + shutdown: make(chan error, 1), + sendMutex: &sync.Mutex{}, + batchMutex: &sync.Mutex{}, + } + + for _, option := range options { + option(c) + } + + go c.loop() + return c, nil +} + +// Collect implements Collector. +func (c *ScribeCollector) Collect(s *zipkincore.Span) error { + c.spanc <- s + return nil // accepted +} + +// Close implements Collector. +func (c *ScribeCollector) Close() error { + close(c.quit) + return <-c.shutdown +} + +func scribeSerialize(s *zipkincore.Span) string { + t := thrift.NewTMemoryBuffer() + p := thrift.NewTBinaryProtocolTransport(t) + if err := s.Write(p); err != nil { + panic(err) + } + return base64.StdEncoding.EncodeToString(t.Buffer.Bytes()) +} + +func (c *ScribeCollector) loop() { + var ( + nextSend = time.Now().Add(c.batchInterval) + ticker = time.NewTicker(c.batchInterval / 10) + tickc = ticker.C + ) + defer ticker.Stop() + + for { + select { + case span := <-c.spanc: + currentBatchSize := c.append(span) + if currentBatchSize >= c.batchSize { + nextSend = time.Now().Add(c.batchInterval) + go c.send() + } + case <-tickc: + if time.Now().After(nextSend) { + nextSend = time.Now().Add(c.batchInterval) + go c.send() + } + case <-c.quit: + c.shutdown <- c.send() + return + } + } +} + +func (c *ScribeCollector) append(span *zipkincore.Span) (newBatchSize int) { + c.batchMutex.Lock() + defer c.batchMutex.Unlock() + + c.batch = append(c.batch, &scribe.LogEntry{ + Category: c.category, + Message: scribeSerialize(span), + }) + if len(c.batch) > c.maxBacklog { + dispose := len(c.batch) - c.maxBacklog + c.logger.Log("Backlog too long, disposing spans.", "count", dispose) + c.batch = c.batch[dispose:] + } + newBatchSize = len(c.batch) + return +} + +func (c *ScribeCollector) send() error { + // in order to prevent sending the same batch twice + c.sendMutex.Lock() + defer c.sendMutex.Unlock() + + // Select all current spans in the batch to be sent + c.batchMutex.Lock() + sendBatch := c.batch[:] + c.batchMutex.Unlock() + + // Do not send an empty batch + if len(sendBatch) == 0 { + return nil + } + + if c.client == nil { + var err error + if c.client, err = c.factory(); err != nil { + _ = c.logger.Log("err", fmt.Sprintf("during reconnect: %v", err)) + return err + } + } + if rc, err := c.client.Log(context.Background(), sendBatch); err != nil { + c.client = nil + _ = c.logger.Log("err", fmt.Sprintf("during Log: %v", err)) + return err + } else if rc != scribe.ResultCode_OK { + // probably transient error; don't reset client + _ = c.logger.Log("err", fmt.Sprintf("remote returned %s", rc)) + } + + // Remove sent spans from the batch + c.batchMutex.Lock() + c.batch = c.batch[len(sendBatch):] + c.batchMutex.Unlock() + + return nil +} + +func scribeClientFactory(addr string, timeout time.Duration) func() (scribe.Scribe, error) { + return func() (scribe.Scribe, error) { + a, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + return nil, err + } + socket := thrift.NewTSocketFromAddrTimeout(a, timeout) + transport := thrift.NewTFramedTransport(socket) + if err := transport.Open(); err != nil { + _ = socket.Close() + return nil, err + } + proto := thrift.NewTBinaryProtocolTransport(transport) + client := scribe.NewScribeClientProtocol(transport, proto, proto) + return client, nil + } +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go new file mode 100644 index 0000000..f8cfb58 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go @@ -0,0 +1,77 @@ +package zipkintracer + +import ( + "strings" + + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" +) + +// Collector represents a Zipkin trace collector, which is probably a set of +// remote endpoints. +type Collector interface { + Collect(*zipkincore.Span) error + Close() error +} + +// NopCollector implements Collector but performs no work. +type NopCollector struct{} + +// Collect implements Collector. +func (NopCollector) Collect(*zipkincore.Span) error { return nil } + +// Close implements Collector. +func (NopCollector) Close() error { return nil } + +// MultiCollector implements Collector by sending spans to all collectors. +type MultiCollector []Collector + +// Collect implements Collector. +func (c MultiCollector) Collect(s *zipkincore.Span) error { + return c.aggregateErrors(func(coll Collector) error { return coll.Collect(s) }) +} + +// Close implements Collector. +func (c MultiCollector) Close() error { + return c.aggregateErrors(func(coll Collector) error { return coll.Close() }) +} + +func (c MultiCollector) aggregateErrors(f func(c Collector) error) error { + var e *collectionError + for i, collector := range c { + if err := f(collector); err != nil { + if e == nil { + e = &collectionError{ + errs: make([]error, len(c)), + } + } + e.errs[i] = err + } + } + return e +} + +// CollectionError represents an array of errors returned by one or more +// failed Collector methods. +type CollectionError interface { + Error() string + GetErrors() []error +} + +type collectionError struct { + errs []error +} + +func (c *collectionError) Error() string { + errs := []string{} + for _, err := range c.errs { + if err != nil { + errs = append(errs, err.Error()) + } + } + return strings.Join(errs, "; ") +} + +// GetErrors implements CollectionError +func (c *collectionError) GetErrors() []error { + return c.errs +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go new file mode 100644 index 0000000..e9fe299 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go @@ -0,0 +1,61 @@ +package zipkintracer + +import ( + "github.com/openzipkin/zipkin-go-opentracing/flag" + "github.com/openzipkin/zipkin-go-opentracing/types" +) + +// SpanContext holds the basic Span metadata. +type SpanContext struct { + // A probabilistically unique identifier for a [multi-span] trace. + TraceID types.TraceID + + // A probabilistically unique identifier for a span. + SpanID uint64 + + // Whether the trace is sampled. + Sampled bool + + // The span's associated baggage. + Baggage map[string]string // initialized on first use + + // The SpanID of this Context's parent, or nil if there is no parent. + ParentSpanID *uint64 + + // Flags provides the ability to create and communicate feature flags. + Flags flag.Flags + + // Whether the span is owned by the current process + Owner bool +} + +// ForeachBaggageItem belongs to the opentracing.SpanContext interface +func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { + for k, v := range c.Baggage { + if !handler(k, v) { + break + } + } +} + +// WithBaggageItem returns an entirely new basictracer SpanContext with the +// given key:value baggage pair set. +func (c SpanContext) WithBaggageItem(key, val string) SpanContext { + var newBaggage map[string]string + if c.Baggage == nil { + newBaggage = map[string]string{key: val} + } else { + newBaggage = make(map[string]string, len(c.Baggage)+1) + for k, v := range c.Baggage { + newBaggage[k] = v + } + newBaggage[key] = val + } + var parentSpanID *uint64 + if c.ParentSpanID != nil { + parentSpanID = new(uint64) + *parentSpanID = *c.ParentSpanID + } + // Use positional parameters so the compiler will help catch new fields. + return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage, parentSpanID, c.Flags, c.Owner} +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go new file mode 100644 index 0000000..1ee00c8 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go @@ -0,0 +1,78 @@ +package zipkintracer + +import ( + "bytes" + "fmt" + "runtime" + "strconv" + "sync" +) + +const debugGoroutineIDTag = "_initial_goroutine" + +type errAssertionFailed struct { + span *spanImpl + msg string +} + +// Error implements the error interface. +func (err *errAssertionFailed) Error() string { + return fmt.Sprintf("%s:\n%+v", err.msg, err.span) +} + +func (s *spanImpl) Lock() { + s.Mutex.Lock() + s.maybeAssertSanityLocked() +} + +func (s *spanImpl) maybeAssertSanityLocked() { + if s.tracer == nil { + s.Mutex.Unlock() + panic(&errAssertionFailed{span: s, msg: "span used after call to Finish()"}) + } + if s.tracer.options.debugAssertSingleGoroutine { + startID := curGoroutineID() + curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64) + if !ok { + // This is likely invoked in the context of the SetTag which sets + // debugGoroutineTag. + return + } + if startID != curID { + s.Mutex.Unlock() + panic(&errAssertionFailed{ + span: s, + msg: fmt.Sprintf("span started on goroutine %d, but now running on %d", startID, curID), + }) + } + } +} + +var goroutineSpace = []byte("goroutine ") +var littleBuf = sync.Pool{ + New: func() interface{} { + buf := make([]byte, 64) + return &buf + }, +} + +// Credit to @bradfitz: +// https://github.com/golang/net/blob/master/http2/gotrack.go#L51 +func curGoroutineID() uint64 { + bp := littleBuf.Get().(*[]byte) + defer littleBuf.Put(bp) + b := *bp + b = b[:runtime.Stack(b, false)] + // Parse the 4707 out of "goroutine 4707 [" + b = bytes.TrimPrefix(b, goroutineSpace) + i := bytes.IndexByte(b, ' ') + if i < 0 { + panic(fmt.Sprintf("No space found in %q", b)) + } + b = b[:i] + n, err := strconv.ParseUint(string(b), 10, 64) + if err != nil { + panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err)) + } + return n +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go new file mode 100644 index 0000000..31b6a00 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go @@ -0,0 +1,62 @@ +package zipkintracer + +import "github.com/opentracing/opentracing-go" + +// A SpanEvent is emitted when a mutating command is called on a Span. +type SpanEvent interface{} + +// EventCreate is emitted when a Span is created. +type EventCreate struct{ OperationName string } + +// EventTag is received when SetTag is called. +type EventTag struct { + Key string + Value interface{} +} + +// EventBaggage is received when SetBaggageItem is called. +type EventBaggage struct { + Key, Value string +} + +// EventLogFields is received when LogFields or LogKV is called. +type EventLogFields opentracing.LogRecord + +// EventLog is received when Log (or one of its derivatives) is called. +// +// DEPRECATED +type EventLog opentracing.LogData + +// EventFinish is received when Finish is called. +type EventFinish RawSpan + +func (s *spanImpl) onCreate(opName string) { + if s.event != nil { + s.event(EventCreate{OperationName: opName}) + } +} +func (s *spanImpl) onTag(key string, value interface{}) { + if s.event != nil { + s.event(EventTag{Key: key, Value: value}) + } +} +func (s *spanImpl) onLog(ld opentracing.LogData) { + if s.event != nil { + s.event(EventLog(ld)) + } +} +func (s *spanImpl) onLogFields(lr opentracing.LogRecord) { + if s.event != nil { + s.event(EventLogFields(lr)) + } +} +func (s *spanImpl) onBaggage(key, value string) { + if s.event != nil { + s.event(EventBaggage{Key: key, Value: value}) + } +} +func (s *spanImpl) onFinish(sp RawSpan) { + if s.event != nil { + s.event(EventFinish(sp)) + } +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go new file mode 100644 index 0000000..05cb10e --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go @@ -0,0 +1,39 @@ +package flag + +// Flags provides the ability to create and communicate feature flags. +type Flags uint64 + +// Flags is a bitset +const ( + Debug Flags = 1 << 0 + + // All flags below deal with binaryPropagators. They will be discarded in the + // textMapPropagator (not read and not set) + + // SamplingSet and Sampled handle Sampled tribool logic for interop with + // instrumenting libraries / propagation channels not using a separate Sampled + // header and potentially encoding this in flags. + // + // When we receive a flag we do this: + // 1. Sampled bit is set => true + // 2. Sampled bit is not set => inspect SamplingSet bit. + // 2a. SamplingSet bit is set => false + // 2b. SamplingSet bit is not set => null + // Note on 2b.: depending on the propagator having a separate Sampled header + // we either assume Sampling is false or unknown. In the latter case we will + // run our sampler even though we are not the root of the trace. + // + // When propagating to a downstream service we will always be explicit and + // will provide a set SamplingSet bit in case of our binary propagator either + SamplingSet Flags = 1 << 1 + Sampled Flags = 1 << 2 + // When set, we can ignore the value of the parentId. This is used for binary + // fixed width transports or transports like proto3 that return a default + // value if a value has not been set (thus not enabling you to distinguish + // between the value being set to the default or not set at all). + // + // While many zipkin systems re-use a trace id as the root span id, we know + // that some don't. With this flag, we can tell for sure if the span is root + // as opposed to the convention trace id == span id == parent id. + IsRoot Flags = 1 << 3 +) diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go new file mode 100644 index 0000000..f5695e0 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go @@ -0,0 +1,113 @@ +// Copyright (c) 2016 Uber Technologies, Inc. +// Copyright (c) 2016 Bas van Beek + +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package zipkintracer + +import ( + "bytes" + "encoding/json" + "errors" + "fmt" + + "github.com/go-logfmt/logfmt" + "github.com/opentracing/opentracing-go/log" +) + +var errEventLogNotFound = errors.New("event log field not found") + +type fieldsAsMap map[string]string + +// MaterializeWithJSON converts log Fields into JSON string +func MaterializeWithJSON(logFields []log.Field) ([]byte, error) { + fields := fieldsAsMap(make(map[string]string, len(logFields))) + for _, field := range logFields { + field.Marshal(fields) + } + return json.Marshal(fields) +} + +// MaterializeWithLogFmt converts log Fields into LogFmt string +func MaterializeWithLogFmt(logFields []log.Field) ([]byte, error) { + var ( + buffer = bytes.NewBuffer(nil) + encoder = logfmt.NewEncoder(buffer) + ) + for _, field := range logFields { + if err := encoder.EncodeKeyval(field.Key(), field.Value()); err != nil { + encoder.EncodeKeyval(field.Key(), err.Error()) + } + } + return buffer.Bytes(), nil +} + +// StrictZipkinMaterializer will only record a log.Field of type "event". +func StrictZipkinMaterializer(logFields []log.Field) ([]byte, error) { + for _, field := range logFields { + if field.Key() == "event" { + return []byte(fmt.Sprintf("%+v", field.Value())), nil + } + } + return nil, errEventLogNotFound +} + +func (ml fieldsAsMap) EmitString(key, value string) { + ml[key] = value +} + +func (ml fieldsAsMap) EmitBool(key string, value bool) { + ml[key] = fmt.Sprintf("%t", value) +} + +func (ml fieldsAsMap) EmitInt(key string, value int) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitInt32(key string, value int32) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitInt64(key string, value int64) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitUint32(key string, value uint32) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitUint64(key string, value uint64) { + ml[key] = fmt.Sprintf("%d", value) +} + +func (ml fieldsAsMap) EmitFloat32(key string, value float32) { + ml[key] = fmt.Sprintf("%f", value) +} + +func (ml fieldsAsMap) EmitFloat64(key string, value float64) { + ml[key] = fmt.Sprintf("%f", value) +} + +func (ml fieldsAsMap) EmitObject(key string, value interface{}) { + ml[key] = fmt.Sprintf("%+v", value) +} + +func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) { + value(ml) +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go new file mode 100644 index 0000000..643f653 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go @@ -0,0 +1,64 @@ +package zipkintracer + +import ( + "errors" + "fmt" + "log" + "strings" +) + +// ErrMissingValue adds a Missing Value Error when the Logging Parameters are +// not even in number +var ErrMissingValue = errors.New("(MISSING)") + +// Logger is the fundamental interface for all log operations. Log creates a +// log event from keyvals, a variadic sequence of alternating keys and values. +// The signature is compatible with the Go kit log package. +type Logger interface { + Log(keyvals ...interface{}) error +} + +// NewNopLogger provides a Logger that discards all Log data sent to it. +func NewNopLogger() Logger { + return &nopLogger{} +} + +// LogWrapper wraps a standard library logger into a Logger compatible with this +// package. +func LogWrapper(l *log.Logger) Logger { + return &wrappedLogger{l: l} +} + +// wrappedLogger implements Logger +type wrappedLogger struct { + l *log.Logger +} + +// Log implements Logger +func (l *wrappedLogger) Log(k ...interface{}) error { + if len(k)%2 == 1 { + k = append(k, ErrMissingValue) + } + o := make([]string, len(k)/2) + for i := 0; i < len(k); i += 2 { + o[i/2] = fmt.Sprintf("%s=%q", k[i], k[i+1]) + } + l.l.Println(strings.Join(o, " ")) + return nil +} + +// nopLogger implements Logger +type nopLogger struct{} + +// Log implements Logger +func (*nopLogger) Log(_ ...interface{}) error { return nil } + +// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If +// f is a function with the appropriate signature, LoggerFunc(f) is a Logger +// object that calls f. +type LoggerFunc func(...interface{}) error + +// Log implements Logger by calling f(keyvals...). +func (f LoggerFunc) Log(keyvals ...interface{}) error { + return f(keyvals...) +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go new file mode 100644 index 0000000..f46ff01 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/observer.go @@ -0,0 +1,52 @@ +package zipkintracer + +import ( + otobserver "github.com/opentracing-contrib/go-observer" + opentracing "github.com/opentracing/opentracing-go" +) + +// observer is a dispatcher to other observers +type observer struct { + observers []otobserver.Observer +} + +// spanObserver is a dispatcher to other span observers +type spanObserver struct { + observers []otobserver.SpanObserver +} + +func (o observer) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (otobserver.SpanObserver, bool) { + var spanObservers []otobserver.SpanObserver + for _, obs := range o.observers { + spanObs, ok := obs.OnStartSpan(sp, operationName, options) + if ok { + if spanObservers == nil { + spanObservers = make([]otobserver.SpanObserver, 0, len(o.observers)) + } + spanObservers = append(spanObservers, spanObs) + } + } + if len(spanObservers) == 0 { + return nil, false + } + + return spanObserver{observers: spanObservers}, true +} + +func (o spanObserver) OnSetOperationName(operationName string) { + for _, obs := range o.observers { + obs.OnSetOperationName(operationName) + } +} + +func (o spanObserver) OnSetTag(key string, value interface{}) { + for _, obs := range o.observers { + obs.OnSetTag(key, value) + } +} + +func (o spanObserver) OnFinish(options opentracing.FinishOptions) { + for _, obs := range o.observers { + obs.OnFinish(options) + } +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go new file mode 100644 index 0000000..56d2d5a --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go @@ -0,0 +1,68 @@ +package zipkintracer + +import ( + opentracing "github.com/opentracing/opentracing-go" + + "github.com/openzipkin/zipkin-go-opentracing/flag" + "github.com/openzipkin/zipkin-go-opentracing/types" +) + +type accessorPropagator struct { + tracer *tracerImpl +} + +// DelegatingCarrier is a flexible carrier interface which can be implemented +// by types which have a means of storing the trace metadata and already know +// how to serialize themselves (for example, protocol buffers). +type DelegatingCarrier interface { + SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) + State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) + SetBaggageItem(key, value string) + GetBaggage(func(key, value string)) +} + +func (p *accessorPropagator) Inject( + spanContext opentracing.SpanContext, + carrier interface{}, +) error { + dc, ok := carrier.(DelegatingCarrier) + if !ok || dc == nil { + return opentracing.ErrInvalidCarrier + } + sc, ok := spanContext.(SpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + dc.SetState(sc.TraceID, sc.SpanID, sc.ParentSpanID, sc.Sampled, sc.Flags) + for k, v := range sc.Baggage { + dc.SetBaggageItem(k, v) + } + return nil +} + +func (p *accessorPropagator) Extract( + carrier interface{}, +) (opentracing.SpanContext, error) { + dc, ok := carrier.(DelegatingCarrier) + if !ok || dc == nil { + return nil, opentracing.ErrInvalidCarrier + } + + traceID, spanID, parentSpanID, sampled, flags := dc.State() + sc := SpanContext{ + TraceID: traceID, + SpanID: spanID, + Sampled: sampled, + Baggage: nil, + ParentSpanID: parentSpanID, + Flags: flags, + } + dc.GetBaggage(func(k, v string) { + if sc.Baggage == nil { + sc.Baggage = map[string]string{} + } + sc.Baggage[k] = v + }) + + return sc, nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go new file mode 100644 index 0000000..2142d5e --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go @@ -0,0 +1,252 @@ +package zipkintracer + +import ( + "encoding/binary" + "io" + "strconv" + "strings" + + "github.com/gogo/protobuf/proto" + opentracing "github.com/opentracing/opentracing-go" + + "github.com/openzipkin/zipkin-go-opentracing/flag" + "github.com/openzipkin/zipkin-go-opentracing/types" + "github.com/openzipkin/zipkin-go-opentracing/wire" +) + +type textMapPropagator struct { + tracer *tracerImpl +} +type binaryPropagator struct { + tracer *tracerImpl +} + +const ( + prefixTracerState = "x-b3-" // we default to interop with non-opentracing zipkin tracers + prefixBaggage = "ot-baggage-" + + tracerStateFieldCount = 3 // not 5, X-B3-ParentSpanId is optional and we allow optional Sampled header + zipkinTraceID = prefixTracerState + "traceid" + zipkinSpanID = prefixTracerState + "spanid" + zipkinParentSpanID = prefixTracerState + "parentspanid" + zipkinSampled = prefixTracerState + "sampled" + zipkinFlags = prefixTracerState + "flags" +) + +func (p *textMapPropagator) Inject( + spanContext opentracing.SpanContext, + opaqueCarrier interface{}, +) error { + sc, ok := spanContext.(SpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + carrier, ok := opaqueCarrier.(opentracing.TextMapWriter) + if !ok { + return opentracing.ErrInvalidCarrier + } + carrier.Set(zipkinTraceID, sc.TraceID.ToHex()) + carrier.Set(zipkinSpanID, strconv.FormatUint(sc.SpanID, 16)) + carrier.Set(zipkinSampled, strconv.FormatBool(sc.Sampled)) + + if sc.ParentSpanID != nil { + // we only set ParentSpanID header if there is a parent span + carrier.Set(zipkinParentSpanID, strconv.FormatUint(*sc.ParentSpanID, 16)) + } + // we only need to inject the debug flag if set. see flag package for details. + flags := sc.Flags & flag.Debug + carrier.Set(zipkinFlags, strconv.FormatUint(uint64(flags), 10)) + + for k, v := range sc.Baggage { + carrier.Set(prefixBaggage+k, v) + } + return nil +} + +func (p *textMapPropagator) Extract( + opaqueCarrier interface{}, +) (opentracing.SpanContext, error) { + carrier, ok := opaqueCarrier.(opentracing.TextMapReader) + if !ok { + return nil, opentracing.ErrInvalidCarrier + } + requiredFieldCount := 0 + var ( + traceID types.TraceID + spanID uint64 + sampled bool + parentSpanID *uint64 + flags flag.Flags + err error + ) + decodedBaggage := make(map[string]string) + err = carrier.ForeachKey(func(k, v string) error { + switch strings.ToLower(k) { + case zipkinTraceID: + traceID, err = types.TraceIDFromHex(v) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + case zipkinSpanID: + spanID, err = strconv.ParseUint(v, 16, 64) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + case zipkinParentSpanID: + var id uint64 + id, err = strconv.ParseUint(v, 16, 64) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + parentSpanID = &id + case zipkinSampled: + sampled, err = strconv.ParseBool(v) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + // Sampled header was explicitly set + flags |= flag.SamplingSet + case zipkinFlags: + var f uint64 + f, err = strconv.ParseUint(v, 10, 64) + if err != nil { + return opentracing.ErrSpanContextCorrupted + } + if flag.Flags(f)&flag.Debug == flag.Debug { + flags |= flag.Debug + } + default: + lowercaseK := strings.ToLower(k) + if strings.HasPrefix(lowercaseK, prefixBaggage) { + decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v + } + // Balance off the requiredFieldCount++ just below... + requiredFieldCount-- + } + requiredFieldCount++ + return nil + }) + if err != nil { + return nil, err + } + if requiredFieldCount < tracerStateFieldCount { + if requiredFieldCount == 0 { + return nil, opentracing.ErrSpanContextNotFound + } + return nil, opentracing.ErrSpanContextCorrupted + } + + // check if Sample state was communicated through the Flags bitset + if !sampled && flags&flag.Sampled == flag.Sampled { + sampled = true + } + + return SpanContext{ + TraceID: traceID, + SpanID: spanID, + Sampled: sampled, + Baggage: decodedBaggage, + ParentSpanID: parentSpanID, + Flags: flags, + }, nil +} + +func (p *binaryPropagator) Inject( + spanContext opentracing.SpanContext, + opaqueCarrier interface{}, +) error { + sc, ok := spanContext.(SpanContext) + if !ok { + return opentracing.ErrInvalidSpanContext + } + carrier, ok := opaqueCarrier.(io.Writer) + if !ok { + return opentracing.ErrInvalidCarrier + } + + state := wire.TracerState{} + state.TraceId = sc.TraceID.Low + state.TraceIdHigh = sc.TraceID.High + state.SpanId = sc.SpanID + state.Sampled = sc.Sampled + state.BaggageItems = sc.Baggage + + // encode the debug bit + flags := sc.Flags & flag.Debug + if sc.ParentSpanID != nil { + state.ParentSpanId = *sc.ParentSpanID + } else { + // root span... + state.ParentSpanId = 0 + flags |= flag.IsRoot + } + + // we explicitly inform our sampling state downstream + flags |= flag.SamplingSet + if sc.Sampled { + flags |= flag.Sampled + } + state.Flags = uint64(flags) + + b, err := proto.Marshal(&state) + if err != nil { + return err + } + + // Write the length of the marshalled binary to the writer. + length := uint32(len(b)) + if err = binary.Write(carrier, binary.BigEndian, &length); err != nil { + return err + } + + _, err = carrier.Write(b) + return err +} + +func (p *binaryPropagator) Extract( + opaqueCarrier interface{}, +) (opentracing.SpanContext, error) { + carrier, ok := opaqueCarrier.(io.Reader) + if !ok { + return nil, opentracing.ErrInvalidCarrier + } + + // Read the length of marshalled binary. io.ReadAll isn't that performant + // since it keeps resizing the underlying buffer as it encounters more bytes + // to read. By reading the length, we can allocate a fixed sized buf and read + // the exact amount of bytes into it. + var length uint32 + if err := binary.Read(carrier, binary.BigEndian, &length); err != nil { + return nil, opentracing.ErrSpanContextCorrupted + } + buf := make([]byte, length) + if n, err := carrier.Read(buf); err != nil { + if n > 0 { + return nil, opentracing.ErrSpanContextCorrupted + } + return nil, opentracing.ErrSpanContextNotFound + } + + ctx := wire.TracerState{} + if err := proto.Unmarshal(buf, &ctx); err != nil { + return nil, opentracing.ErrSpanContextCorrupted + } + + flags := flag.Flags(ctx.Flags) + if flags&flag.Sampled == flag.Sampled { + ctx.Sampled = true + } + // this propagator expects sampling state to be explicitly propagated by the + // upstream service. so set this flag to indentify to tracer it should not + // run its sampler in case it is not the root of the trace. + flags |= flag.SamplingSet + + return SpanContext{ + TraceID: types.TraceID{Low: ctx.TraceId, High: ctx.TraceIdHigh}, + SpanID: ctx.SpanId, + Sampled: ctx.Sampled, + Baggage: ctx.BaggageItems, + ParentSpanID: &ctx.ParentSpanId, + Flags: flags, + }, nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go new file mode 100644 index 0000000..03bc15b --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go @@ -0,0 +1,30 @@ +package zipkintracer + +import ( + "time" + + opentracing "github.com/opentracing/opentracing-go" +) + +// RawSpan encapsulates all state associated with a (finished) Span. +type RawSpan struct { + // Those recording the RawSpan should also record the contents of its + // SpanContext. + Context SpanContext + + // The name of the "operation" this span is an instance of. (Called a "span + // name" in some implementations) + Operation string + + // We store rather than so that only + // one of the timestamps has global clock uncertainty issues. + Start time.Time + Duration time.Duration + + // Essentially an extension mechanism. Can be used for many purposes, + // not to be enumerated here. + Tags opentracing.Tags + + // The span's "microlog". + Logs []opentracing.LogRecord +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go new file mode 100644 index 0000000..0b8eeb7 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go @@ -0,0 +1,60 @@ +package zipkintracer + +import "sync" + +// A SpanRecorder handles all of the `RawSpan` data generated via an +// associated `Tracer` (see `NewStandardTracer`) instance. It also names +// the containing process and provides access to a straightforward tag map. +type SpanRecorder interface { + // Implementations must determine whether and where to store `span`. + RecordSpan(span RawSpan) +} + +// InMemorySpanRecorder is a simple thread-safe implementation of +// SpanRecorder that stores all reported spans in memory, accessible +// via reporter.GetSpans(). It is primarily intended for testing purposes. +type InMemorySpanRecorder struct { + sync.RWMutex + spans []RawSpan +} + +// NewInMemoryRecorder creates new InMemorySpanRecorder +func NewInMemoryRecorder() *InMemorySpanRecorder { + return new(InMemorySpanRecorder) +} + +// RecordSpan implements the respective method of SpanRecorder. +func (r *InMemorySpanRecorder) RecordSpan(span RawSpan) { + r.Lock() + defer r.Unlock() + r.spans = append(r.spans, span) +} + +// GetSpans returns a copy of the array of spans accumulated so far. +func (r *InMemorySpanRecorder) GetSpans() []RawSpan { + r.RLock() + defer r.RUnlock() + spans := make([]RawSpan, len(r.spans)) + copy(spans, r.spans) + return spans +} + +// GetSampledSpans returns a slice of spans accumulated so far which were sampled. +func (r *InMemorySpanRecorder) GetSampledSpans() []RawSpan { + r.RLock() + defer r.RUnlock() + spans := make([]RawSpan, 0, len(r.spans)) + for _, span := range r.spans { + if span.Context.Sampled { + spans = append(spans, span) + } + } + return spans +} + +// Reset clears the internal array of spans. +func (r *InMemorySpanRecorder) Reset() { + r.Lock() + defer r.Unlock() + r.spans = nil +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go new file mode 100644 index 0000000..bb7ff0a --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go @@ -0,0 +1,99 @@ +package zipkintracer + +import ( + "math" + "math/rand" + "sync" + "time" +) + +// Sampler functions return if a Zipkin span should be sampled, based on its +// traceID. +type Sampler func(id uint64) bool + +func neverSample(_ uint64) bool { return false } + +func alwaysSample(_ uint64) bool { return true } + +// ModuloSampler provides a typical OpenTracing type Sampler. +func ModuloSampler(mod uint64) Sampler { + if mod < 2 { + return alwaysSample + } + return func(id uint64) bool { + return (id % mod) == 0 + } +} + +// NewBoundarySampler is appropriate for high-traffic instrumentation who +// provision random trace ids, and make the sampling decision only once. +// It defends against nodes in the cluster selecting exactly the same ids. +func NewBoundarySampler(rate float64, salt int64) Sampler { + if rate <= 0 { + return neverSample + } + if rate >= 1.0 { + return alwaysSample + } + var ( + boundary = int64(rate * 10000) + usalt = uint64(salt) + ) + return func(id uint64) bool { + return int64(math.Abs(float64(id^usalt)))%10000 < boundary + } +} + +// NewCountingSampler is appropriate for low-traffic instrumentation or +// those who do not provision random trace ids. It is not appropriate for +// collectors as the sampling decision isn't idempotent (consistent based +// on trace id). +func NewCountingSampler(rate float64) Sampler { + if rate <= 0 { + return neverSample + } + if rate >= 1.0 { + return alwaysSample + } + var ( + i = 0 + outOf100 = int(rate*100 + math.Copysign(0.5, rate*100)) // for rounding float to int conversion instead of truncation + decisions = randomBitSet(100, outOf100, rand.New(rand.NewSource(time.Now().UnixNano()))) + mtx = &sync.Mutex{} + ) + + return func(_ uint64) bool { + mtx.Lock() + defer mtx.Unlock() + result := decisions[i] + i++ + if i == 100 { + i = 0 + } + return result + } +} + +/** + * Reservoir sampling algorithm borrowed from Stack Overflow. + * + * http://stackoverflow.com/questions/12817946/generate-a-random-bitset-with-n-1s + */ +func randomBitSet(size int, cardinality int, rnd *rand.Rand) []bool { + result := make([]bool, size) + chosen := make([]int, cardinality) + var i int + for i = 0; i < cardinality; i++ { + chosen[i] = i + result[i] = true + } + for ; i < size; i++ { + j := rnd.Intn(i + 1) + if j < cardinality { + result[chosen[j]] = false + result[i] = true + chosen[j] = i + } + } + return result +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go new file mode 100644 index 0000000..4850a94 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go @@ -0,0 +1,290 @@ +package zipkintracer + +import ( + "sync" + "time" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + + otobserver "github.com/opentracing-contrib/go-observer" + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" +) + +// Span provides access to the essential details of the span, for use +// by zipkintracer consumers. These methods may only be called prior +// to (*opentracing.Span).Finish(). +type Span interface { + opentracing.Span + + // Operation names the work done by this span instance + Operation() string + + // Start indicates when the span began + Start() time.Time +} + +// Implements the `Span` interface. Created via tracerImpl (see +// `zipkintracer.NewTracer()`). +type spanImpl struct { + tracer *tracerImpl + event func(SpanEvent) + observer otobserver.SpanObserver + sync.Mutex // protects the fields below + raw RawSpan + // The number of logs dropped because of MaxLogsPerSpan. + numDroppedLogs int + Endpoint *zipkincore.Endpoint +} + +var spanPool = &sync.Pool{New: func() interface{} { + return &spanImpl{} +}} + +func (s *spanImpl) reset() { + s.tracer, s.event = nil, nil + // Note: Would like to do the following, but then the consumer of RawSpan + // (the recorder) needs to make sure that they're not holding on to the + // baggage or logs when they return (i.e. they need to copy if they care): + // + // logs, baggage := s.raw.Logs[:0], s.raw.Baggage + // for k := range baggage { + // delete(baggage, k) + // } + // s.raw.Logs, s.raw.Baggage = logs, baggage + // + // That's likely too much to ask for. But there is some magic we should + // be able to do with `runtime.SetFinalizer` to reclaim that memory into + // a buffer pool when GC considers them unreachable, which should ease + // some of the load. Hard to say how quickly that would be in practice + // though. + s.raw = RawSpan{ + Context: SpanContext{}, + } +} + +func (s *spanImpl) SetOperationName(operationName string) opentracing.Span { + if s.observer != nil { + s.observer.OnSetOperationName(operationName) + } + s.Lock() + defer s.Unlock() + s.raw.Operation = operationName + return s +} + +func (s *spanImpl) trim() bool { + return !s.raw.Context.Sampled && s.tracer.options.trimUnsampledSpans +} + +func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span { + defer s.onTag(key, value) + if s.observer != nil { + s.observer.OnSetTag(key, value) + } + + s.Lock() + defer s.Unlock() + if key == string(ext.SamplingPriority) { + if v, ok := value.(uint16); ok { + s.raw.Context.Sampled = v != 0 + return s + } + } + if s.trim() { + return s + } + + if s.raw.Tags == nil { + s.raw.Tags = opentracing.Tags{} + } + s.raw.Tags[key] = value + return s +} + +func (s *spanImpl) LogKV(keyValues ...interface{}) { + fields, err := log.InterleavedKVToFields(keyValues...) + if err != nil { + s.LogFields(log.Error(err), log.String("function", "LogKV")) + return + } + s.LogFields(fields...) +} + +func (s *spanImpl) appendLog(lr opentracing.LogRecord) { + maxLogs := s.tracer.options.maxLogsPerSpan + if maxLogs == 0 || len(s.raw.Logs) < maxLogs { + s.raw.Logs = append(s.raw.Logs, lr) + return + } + + // We have too many logs. We don't touch the first numOld logs; we treat the + // rest as a circular buffer and overwrite the oldest log among those. + numOld := (maxLogs - 1) / 2 + numNew := maxLogs - numOld + s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr + s.numDroppedLogs++ +} + +func (s *spanImpl) LogFields(fields ...log.Field) { + lr := opentracing.LogRecord{ + Fields: fields, + } + defer s.onLogFields(lr) + s.Lock() + defer s.Unlock() + if s.trim() || s.tracer.options.dropAllLogs { + return + } + if lr.Timestamp.IsZero() { + lr.Timestamp = time.Now() + } + s.appendLog(lr) +} + +func (s *spanImpl) LogEvent(event string) { + s.Log(opentracing.LogData{ + Event: event, + }) +} + +func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) { + s.Log(opentracing.LogData{ + Event: event, + Payload: payload, + }) +} + +func (s *spanImpl) Log(ld opentracing.LogData) { + defer s.onLog(ld) + s.Lock() + defer s.Unlock() + if s.trim() || s.tracer.options.dropAllLogs { + return + } + + if ld.Timestamp.IsZero() { + ld.Timestamp = time.Now() + } + + s.appendLog(ld.ToLogRecord()) +} + +func (s *spanImpl) Finish() { + s.FinishWithOptions(opentracing.FinishOptions{}) +} + +// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at +// the end (i.e. pos circular left shifts). +func rotateLogBuffer(buf []opentracing.LogRecord, pos int) { + // This algorithm is described in: + // http://www.cplusplus.com/reference/algorithm/rotate + for first, middle, next := 0, pos, pos; first != middle; { + buf[first], buf[next] = buf[next], buf[first] + first++ + next++ + if next == len(buf) { + next = middle + } else if first == middle { + middle = next + } + } +} + +func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) { + finishTime := opts.FinishTime + if finishTime.IsZero() { + finishTime = time.Now() + } + duration := finishTime.Sub(s.raw.Start) + + if s.observer != nil { + s.observer.OnFinish(opts) + } + + s.Lock() + defer s.Unlock() + + for _, lr := range opts.LogRecords { + s.appendLog(lr) + } + for _, ld := range opts.BulkLogData { + s.appendLog(ld.ToLogRecord()) + } + + if s.numDroppedLogs > 0 { + // We dropped some log events, which means that we used part of Logs as a + // circular buffer (see appendLog). De-circularize it. + numOld := (len(s.raw.Logs) - 1) / 2 + numNew := len(s.raw.Logs) - numOld + rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew) + + // Replace the log in the middle (the oldest "new" log) with information + // about the dropped logs. This means that we are effectively dropping one + // more "new" log. + numDropped := s.numDroppedLogs + 1 + s.raw.Logs[numOld] = opentracing.LogRecord{ + // Keep the timestamp of the last dropped event. + Timestamp: s.raw.Logs[numOld].Timestamp, + Fields: []log.Field{ + log.String("event", "dropped Span logs"), + log.Int("dropped_log_count", numDropped), + log.String("component", "zipkintracer"), + }, + } + } + + s.raw.Duration = duration + + s.onFinish(s.raw) + s.tracer.options.recorder.RecordSpan(s.raw) + + // Last chance to get options before the span is possibly reset. + poolEnabled := s.tracer.options.enableSpanPool + if s.tracer.options.debugAssertUseAfterFinish { + // This makes it much more likely to catch a panic on any subsequent + // operation since s.tracer is accessed on every call to `Lock`. + // We don't call `reset()` here to preserve the logs in the Span + // which are printed when the assertion triggers. + s.tracer = nil + } + + if poolEnabled { + spanPool.Put(s) + } +} + +func (s *spanImpl) Tracer() opentracing.Tracer { + return s.tracer +} + +func (s *spanImpl) Context() opentracing.SpanContext { + return s.raw.Context +} + +func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span { + s.onBaggage(key, val) + if s.trim() { + return s + } + + s.Lock() + defer s.Unlock() + s.raw.Context = s.raw.Context.WithBaggageItem(key, val) + return s +} + +func (s *spanImpl) BaggageItem(key string) string { + s.Lock() + defer s.Unlock() + return s.raw.Context.Baggage[key] +} + +func (s *spanImpl) Operation() string { + return s.raw.Operation +} + +func (s *spanImpl) Start() time.Time { + return s.raw.Start +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go new file mode 100644 index 0000000..9b51d87 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/GoUnusedProtection__.go @@ -0,0 +1,7 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package scribe + +var GoUnusedProtection__ int; + diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go new file mode 100644 index 0000000..be5fbae --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe-consts.go @@ -0,0 +1,24 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package scribe + +import ( + "bytes" + "reflect" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + + +func init() { +} + diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go new file mode 100644 index 0000000..3780172 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe/scribe.go @@ -0,0 +1,617 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package scribe + +import ( + "bytes" + "reflect" + "database/sql/driver" + "errors" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +type ResultCode int64 +const ( + ResultCode_OK ResultCode = 0 + ResultCode_TRY_LATER ResultCode = 1 +) + +func (p ResultCode) String() string { + switch p { + case ResultCode_OK: return "OK" + case ResultCode_TRY_LATER: return "TRY_LATER" + } + return "" +} + +func ResultCodeFromString(s string) (ResultCode, error) { + switch s { + case "OK": return ResultCode_OK, nil + case "TRY_LATER": return ResultCode_TRY_LATER, nil + } + return ResultCode(0), fmt.Errorf("not a valid ResultCode string") +} + + +func ResultCodePtr(v ResultCode) *ResultCode { return &v } + +func (p ResultCode) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *ResultCode) UnmarshalText(text []byte) error { +q, err := ResultCodeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *ResultCode) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = ResultCode(v) +return nil +} + +func (p * ResultCode) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +// Attributes: +// - Category +// - Message +type LogEntry struct { + Category string `thrift:"category,1" db:"category" json:"category"` + Message string `thrift:"message,2" db:"message" json:"message"` +} + +func NewLogEntry() *LogEntry { + return &LogEntry{} +} + + +func (p *LogEntry) GetCategory() string { + return p.Category +} + +func (p *LogEntry) GetMessage() string { + return p.Message +} +func (p *LogEntry) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *LogEntry) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Category = v +} + return nil +} + +func (p *LogEntry) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Message = v +} + return nil +} + +func (p *LogEntry) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("LogEntry"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *LogEntry) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("category", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:category: ", p), err) } + if err := oprot.WriteString(string(p.Category)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.category (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:category: ", p), err) } + return err +} + +func (p *LogEntry) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("message", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:message: ", p), err) } + if err := oprot.WriteString(string(p.Message)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.message (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:message: ", p), err) } + return err +} + +func (p *LogEntry) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("LogEntry(%+v)", *p) +} + +type Scribe interface { + // Parameters: + // - Messages + Log(ctx context.Context, messages []*LogEntry) (r ResultCode, err error) +} + +type ScribeClient struct { + Transport thrift.TTransport + ProtocolFactory thrift.TProtocolFactory + InputProtocol thrift.TProtocol + OutputProtocol thrift.TProtocol + SeqId int32 +} + +func NewScribeClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ScribeClient { + return &ScribeClient{Transport: t, + ProtocolFactory: f, + InputProtocol: f.GetProtocol(t), + OutputProtocol: f.GetProtocol(t), + SeqId: 0, + } +} + +func NewScribeClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ScribeClient { + return &ScribeClient{Transport: t, + ProtocolFactory: nil, + InputProtocol: iprot, + OutputProtocol: oprot, + SeqId: 0, + } +} + +// Parameters: +// - Messages +func (p *ScribeClient) Log(ctx context.Context, messages []*LogEntry) (r ResultCode, err error) { + if err = p.sendLog(messages); err != nil { return } + return p.recvLog() +} + +func (p *ScribeClient) sendLog(messages []*LogEntry)(err error) { + oprot := p.OutputProtocol + if oprot == nil { + oprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.OutputProtocol = oprot + } + p.SeqId++ + if err = oprot.WriteMessageBegin("Log", thrift.CALL, p.SeqId); err != nil { + return + } + args := ScribeLogArgs{ + Messages : messages, + } + if err = args.Write(oprot); err != nil { + return + } + if err = oprot.WriteMessageEnd(); err != nil { + return + } + return oprot.Flush() +} + + +func (p *ScribeClient) recvLog() (value ResultCode, err error) { + iprot := p.InputProtocol + if iprot == nil { + iprot = p.ProtocolFactory.GetProtocol(p.Transport) + p.InputProtocol = iprot + } + method, mTypeId, seqId, err := iprot.ReadMessageBegin() + if err != nil { + return + } + if method != "Log" { + err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Log failed: wrong method name") + return + } + if p.SeqId != seqId { + err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Log failed: out of sequence response") + return + } + if mTypeId == thrift.EXCEPTION { + error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception") + var error1 error + error1, err = error0.Read(iprot) + if err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + err = error1 + return + } + if mTypeId != thrift.REPLY { + err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Log failed: invalid message type") + return + } + result := ScribeLogResult{} + if err = result.Read(iprot); err != nil { + return + } + if err = iprot.ReadMessageEnd(); err != nil { + return + } + value = result.GetSuccess() + return +} + + +type ScribeProcessor struct { + processorMap map[string]thrift.TProcessorFunction + handler Scribe +} + +func (p *ScribeProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { + p.processorMap[key] = processor +} + +func (p *ScribeProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { + processor, ok = p.processorMap[key] + return processor, ok +} + +func (p *ScribeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { + return p.processorMap +} + +func NewScribeProcessor(handler Scribe) *ScribeProcessor { + + self2 := &ScribeProcessor{handler:handler, processorMap:make(map[string]thrift.TProcessorFunction)} + self2.processorMap["Log"] = &scribeProcessorLog{handler:handler} +return self2 +} + +func (p *ScribeProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + name, _, seqId, err := iprot.ReadMessageBegin() + if err != nil { return false, err } + if processor, ok := p.GetProcessorFunction(name); ok { + return processor.Process(ctx, seqId, iprot, oprot) + } + iprot.Skip(thrift.STRUCT) + iprot.ReadMessageEnd() + x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function " + name) + oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) + x3.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, x3 + +} + +type scribeProcessorLog struct { + handler Scribe +} + +func (p *scribeProcessorLog) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { + args := ScribeLogArgs{} + if err = args.Read(iprot); err != nil { + iprot.ReadMessageEnd() + x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) + oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return false, err + } + + iprot.ReadMessageEnd() + result := ScribeLogResult{} +var retval ResultCode + var err2 error + if retval, err2 = p.handler.Log(ctx, args.Messages); err2 != nil { + x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Log: " + err2.Error()) + oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId) + x.Write(oprot) + oprot.WriteMessageEnd() + oprot.Flush() + return true, err2 + } else { + result.Success = &retval +} + if err2 = oprot.WriteMessageBegin("Log", thrift.REPLY, seqId); err2 != nil { + err = err2 + } + if err2 = result.Write(oprot); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { + err = err2 + } + if err2 = oprot.Flush(); err == nil && err2 != nil { + err = err2 + } + if err != nil { + return + } + return true, err +} + + +// HELPER FUNCTIONS AND STRUCTURES + +// Attributes: +// - Messages +type ScribeLogArgs struct { + Messages []*LogEntry `thrift:"messages,1" db:"messages" json:"messages"` +} + +func NewScribeLogArgs() *ScribeLogArgs { + return &ScribeLogArgs{} +} + + +func (p *ScribeLogArgs) GetMessages() []*LogEntry { + return p.Messages +} +func (p *ScribeLogArgs) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.LIST { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ScribeLogArgs) ReadField1(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*LogEntry, 0, size) + p.Messages = tSlice + for i := 0; i < size; i ++ { + _elem4 := &LogEntry{} + if err := _elem4.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) + } + p.Messages = append(p.Messages, _elem4) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *ScribeLogArgs) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Log_args"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ScribeLogArgs) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("messages", thrift.LIST, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:messages: ", p), err) } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Messages)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Messages { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:messages: ", p), err) } + return err +} + +func (p *ScribeLogArgs) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ScribeLogArgs(%+v)", *p) +} + +// Attributes: +// - Success +type ScribeLogResult struct { + Success *ResultCode `thrift:"success,0" db:"success" json:"success,omitempty"` +} + +func NewScribeLogResult() *ScribeLogResult { + return &ScribeLogResult{} +} + +var ScribeLogResult_Success_DEFAULT ResultCode +func (p *ScribeLogResult) GetSuccess() ResultCode { + if !p.IsSetSuccess() { + return ScribeLogResult_Success_DEFAULT + } +return *p.Success +} +func (p *ScribeLogResult) IsSetSuccess() bool { + return p.Success != nil +} + +func (p *ScribeLogResult) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 0: + if fieldTypeId == thrift.I32 { + if err := p.ReadField0(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *ScribeLogResult) ReadField0(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 0: ", err) +} else { + temp := ResultCode(v) + p.Success = &temp +} + return nil +} + +func (p *ScribeLogResult) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Log_result"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField0(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *ScribeLogResult) writeField0(oprot thrift.TProtocol) (err error) { + if p.IsSetSuccess() { + if err := oprot.WriteFieldBegin("success", thrift.I32, 0); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) } + if err := oprot.WriteI32(int32(*p.Success)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) } + } + return err +} + +func (p *ScribeLogResult) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("ScribeLogResult(%+v)", *p) +} + + diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go new file mode 100644 index 0000000..2d5ebe7 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/GoUnusedProtection__.go @@ -0,0 +1,7 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package zipkincore + +var GoUnusedProtection__ int; + diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go new file mode 100644 index 0000000..6a662ea --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore-consts.go @@ -0,0 +1,45 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package zipkincore + +import ( + "bytes" + "reflect" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +const CLIENT_SEND = "cs" +const CLIENT_RECV = "cr" +const SERVER_SEND = "ss" +const SERVER_RECV = "sr" +const WIRE_SEND = "ws" +const WIRE_RECV = "wr" +const CLIENT_SEND_FRAGMENT = "csf" +const CLIENT_RECV_FRAGMENT = "crf" +const SERVER_SEND_FRAGMENT = "ssf" +const SERVER_RECV_FRAGMENT = "srf" +const HTTP_HOST = "http.host" +const HTTP_METHOD = "http.method" +const HTTP_PATH = "http.path" +const HTTP_URL = "http.url" +const HTTP_STATUS_CODE = "http.status_code" +const HTTP_REQUEST_SIZE = "http.request.size" +const HTTP_RESPONSE_SIZE = "http.response.size" +const LOCAL_COMPONENT = "lc" +const ERROR = "error" +const CLIENT_ADDR = "ca" +const SERVER_ADDR = "sa" + +func init() { +} + diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go new file mode 100644 index 0000000..42f048a --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore/zipkinCore.go @@ -0,0 +1,1285 @@ +// Autogenerated by Thrift Compiler (1.0.0-dev) +// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + +package zipkincore + +import ( + "bytes" + "reflect" + "database/sql/driver" + "errors" + "context" + "fmt" + "github.com/apache/thrift/lib/go/thrift" +) + +// (needed to ensure safety because of naive import list construction.) +var _ = thrift.ZERO +var _ = fmt.Printf +var _ = context.Background +var _ = reflect.DeepEqual +var _ = bytes.Equal + +//A subset of thrift base types, except BYTES. +type AnnotationType int64 +const ( + AnnotationType_BOOL AnnotationType = 0 + AnnotationType_BYTES AnnotationType = 1 + AnnotationType_I16 AnnotationType = 2 + AnnotationType_I32 AnnotationType = 3 + AnnotationType_I64 AnnotationType = 4 + AnnotationType_DOUBLE AnnotationType = 5 + AnnotationType_STRING AnnotationType = 6 +) + +func (p AnnotationType) String() string { + switch p { + case AnnotationType_BOOL: return "BOOL" + case AnnotationType_BYTES: return "BYTES" + case AnnotationType_I16: return "I16" + case AnnotationType_I32: return "I32" + case AnnotationType_I64: return "I64" + case AnnotationType_DOUBLE: return "DOUBLE" + case AnnotationType_STRING: return "STRING" + } + return "" +} + +func AnnotationTypeFromString(s string) (AnnotationType, error) { + switch s { + case "BOOL": return AnnotationType_BOOL, nil + case "BYTES": return AnnotationType_BYTES, nil + case "I16": return AnnotationType_I16, nil + case "I32": return AnnotationType_I32, nil + case "I64": return AnnotationType_I64, nil + case "DOUBLE": return AnnotationType_DOUBLE, nil + case "STRING": return AnnotationType_STRING, nil + } + return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string") +} + + +func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v } + +func (p AnnotationType) MarshalText() ([]byte, error) { +return []byte(p.String()), nil +} + +func (p *AnnotationType) UnmarshalText(text []byte) error { +q, err := AnnotationTypeFromString(string(text)) +if (err != nil) { +return err +} +*p = q +return nil +} + +func (p *AnnotationType) Scan(value interface{}) error { +v, ok := value.(int64) +if !ok { +return errors.New("Scan value is not int64") +} +*p = AnnotationType(v) +return nil +} + +func (p * AnnotationType) Value() (driver.Value, error) { + if p == nil { + return nil, nil + } +return int64(*p), nil +} +// Indicates the network context of a service recording an annotation with two +// exceptions. +// +// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR, +// the endpoint indicates the source or destination of an RPC. This exception +// allows zipkin to display network context of uninstrumented services, or +// clients such as web browsers. +// +// Attributes: +// - Ipv4: IPv4 host address packed into 4 bytes. +// +// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4 +// - Port: IPv4 port or 0, if unknown. +// +// Note: this is to be treated as an unsigned integer, so watch for negatives. +// - ServiceName: Classifier of a source or destination in lowercase, such as "zipkin-web". +// +// This is the primary parameter for trace lookup, so should be intuitive as +// possible, for example, matching names in service discovery. +// +// Conventionally, when the service name isn't known, service_name = "unknown". +// However, it is also permissible to set service_name = "" (empty string). +// The difference in the latter usage is that the span will not be queryable +// by service name unless more information is added to the span with non-empty +// service name, e.g. an additional annotation from the server. +// +// Particularly clients may not have a reliable service name at ingest. One +// approach is to set service_name to "" at ingest, and later assign a +// better label based on binary annotations, such as user agent. +// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes() +type Endpoint struct { + Ipv4 int32 `thrift:"ipv4,1" db:"ipv4" json:"ipv4"` + Port int16 `thrift:"port,2" db:"port" json:"port"` + ServiceName string `thrift:"service_name,3" db:"service_name" json:"service_name"` + Ipv6 []byte `thrift:"ipv6,4" db:"ipv6" json:"ipv6,omitempty"` +} + +func NewEndpoint() *Endpoint { + return &Endpoint{} +} + + +func (p *Endpoint) GetIpv4() int32 { + return p.Ipv4 +} + +func (p *Endpoint) GetPort() int16 { + return p.Port +} + +func (p *Endpoint) GetServiceName() string { + return p.ServiceName +} +var Endpoint_Ipv6_DEFAULT []byte + +func (p *Endpoint) GetIpv6() []byte { + return p.Ipv6 +} +func (p *Endpoint) IsSetIpv6() bool { + return p.Ipv6 != nil +} + +func (p *Endpoint) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I32 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.I16 { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRING { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Endpoint) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Ipv4 = v +} + return nil +} + +func (p *Endpoint) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI16(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Port = v +} + return nil +} + +func (p *Endpoint) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.ServiceName = v +} + return nil +} + +func (p *Endpoint) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.Ipv6 = v +} + return nil +} + +func (p *Endpoint) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Endpoint"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + if err := p.writeField3(oprot); err != nil { return err } + if err := p.writeField4(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err) } + if err := oprot.WriteI32(int32(p.Ipv4)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err) } + return err +} + +func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err) } + if err := oprot.WriteI16(int16(p.Port)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err) } + return err +} + +func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err) } + if err := oprot.WriteString(string(p.ServiceName)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err) } + return err +} + +func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetIpv6() { + if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err) } + if err := oprot.WriteBinary(p.Ipv6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err) } + } + return err +} + +func (p *Endpoint) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Endpoint(%+v)", *p) +} + +// Associates an event that explains latency with a timestamp. +// +// Unlike log statements, annotations are often codes: for example "sr". +// +// Attributes: +// - Timestamp: Microseconds from epoch. +// +// This value should use the most precise value possible. For example, +// gettimeofday or multiplying currentTimeMillis by 1000. +// - Value: Usually a short tag indicating an event, like "sr" or "finagle.retry". +// - Host: The host that recorded the value, primarily for query by service name. +type Annotation struct { + Timestamp int64 `thrift:"timestamp,1" db:"timestamp" json:"timestamp"` + Value string `thrift:"value,2" db:"value" json:"value"` + Host *Endpoint `thrift:"host,3" db:"host" json:"host,omitempty"` +} + +func NewAnnotation() *Annotation { + return &Annotation{} +} + + +func (p *Annotation) GetTimestamp() int64 { + return p.Timestamp +} + +func (p *Annotation) GetValue() string { + return p.Value +} +var Annotation_Host_DEFAULT *Endpoint +func (p *Annotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return Annotation_Host_DEFAULT + } +return p.Host +} +func (p *Annotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *Annotation) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Annotation) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Timestamp = v +} + return nil +} + +func (p *Annotation) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Value = v +} + return nil +} + +func (p *Annotation) ReadField3(iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *Annotation) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Annotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + if err := p.writeField3(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) } + if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) } + return err +} + +func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } + if err := oprot.WriteString(string(p.Value)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } + return err +} + +func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err) } + if err := p.Host.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err) } + } + return err +} + +func (p *Annotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Annotation(%+v)", *p) +} + +// Binary annotations are tags applied to a Span to give it context. For +// example, a binary annotation of HTTP_PATH ("http.path") could the path +// to a resource in a RPC call. +// +// Binary annotations of type STRING are always queryable, though more a +// historical implementation detail than a structural concern. +// +// Binary annotations can repeat, and vary on the host. Similar to Annotation, +// the host indicates who logged the event. This allows you to tell the +// difference between the client and server side of the same key. For example, +// the key "http.path" might be different on the client and server side due to +// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field, +// you can see the different points of view, which often help in debugging. +// +// Attributes: +// - Key: Name used to lookup spans, such as "http.path" or "finagle.version". +// - Value: Serialized thrift bytes, in TBinaryProtocol format. +// +// For legacy reasons, byte order is big-endian. See THRIFT-3217. +// - AnnotationType: The thrift type of value, most often STRING. +// +// annotation_type shouldn't vary for the same key. +// - Host: The host that recorded value, allowing query by service name or address. +// +// There are two exceptions: when key is "ca" or "sa", this is the source or +// destination of an RPC. This exception allows zipkin to display network +// context of uninstrumented services, such as browsers or databases. +type BinaryAnnotation struct { + Key string `thrift:"key,1" db:"key" json:"key"` + Value []byte `thrift:"value,2" db:"value" json:"value"` + AnnotationType AnnotationType `thrift:"annotation_type,3" db:"annotation_type" json:"annotation_type"` + Host *Endpoint `thrift:"host,4" db:"host" json:"host,omitempty"` +} + +func NewBinaryAnnotation() *BinaryAnnotation { + return &BinaryAnnotation{} +} + + +func (p *BinaryAnnotation) GetKey() string { + return p.Key +} + +func (p *BinaryAnnotation) GetValue() []byte { + return p.Value +} + +func (p *BinaryAnnotation) GetAnnotationType() AnnotationType { + return p.AnnotationType +} +var BinaryAnnotation_Host_DEFAULT *Endpoint +func (p *BinaryAnnotation) GetHost() *Endpoint { + if !p.IsSetHost() { + return BinaryAnnotation_Host_DEFAULT + } +return p.Host +} +func (p *BinaryAnnotation) IsSetHost() bool { + return p.Host != nil +} + +func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.STRING { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 2: + if fieldTypeId == thrift.STRING { + if err := p.ReadField2(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.I32 { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.STRUCT { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *BinaryAnnotation) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.Key = v +} + return nil +} + +func (p *BinaryAnnotation) ReadField2(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBinary(); err != nil { + return thrift.PrependError("error reading field 2: ", err) +} else { + p.Value = v +} + return nil +} + +func (p *BinaryAnnotation) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI32(); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + temp := AnnotationType(v) + p.AnnotationType = temp +} + return nil +} + +func (p *BinaryAnnotation) ReadField4(iprot thrift.TProtocol) error { + p.Host = &Endpoint{} + if err := p.Host.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err) + } + return nil +} + +func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField2(oprot); err != nil { return err } + if err := p.writeField3(oprot); err != nil { return err } + if err := p.writeField4(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) } + if err := oprot.WriteString(string(p.Key)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) } + return err +} + +func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err) } + if err := oprot.WriteBinary(p.Value); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err) } + return err +} + +func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err) } + if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err) } + return err +} + +func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) { + if p.IsSetHost() { + if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err) } + if err := p.Host.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err) } + } + return err +} + +func (p *BinaryAnnotation) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("BinaryAnnotation(%+v)", *p) +} + +// A trace is a series of spans (often RPC calls) which form a latency tree. +// +// Spans are usually created by instrumentation in RPC clients or servers, but +// can also represent in-process activity. Annotations in spans are similar to +// log statements, and are sometimes created directly by application developers +// to indicate events of interest, such as a cache miss. +// +// The root span is where parent_id = Nil; it usually has the longest duration +// in the trace. +// +// Span identifiers are packed into i64s, but should be treated opaquely. +// String encoding is fixed-width lower-hex, to avoid signed interpretation. +// +// Attributes: +// - TraceID: Unique 8-byte identifier for a trace, set on all spans within it. +// - Name: Span name in lowercase, rpc method for example. Conventionally, when the +// span name isn't known, name = "unknown". +// - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely +// identified in storage by (trace_id, id). +// - ParentID: The parent's Span.id; absent if this the root span in a trace. +// - Annotations: Associates events that explain latency with a timestamp. Unlike log +// statements, annotations are often codes: for example SERVER_RECV("sr"). +// Annotations are sorted ascending by timestamp. +// - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For +// example, a binary annotation key could be "http.path". +// - Debug: True is a request to store this span even if it overrides sampling policy. +// - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete +// span. +// +// This value should be set directly by instrumentation, using the most +// precise value possible. For example, gettimeofday or syncing nanoTime +// against a tick of currentTimeMillis. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this via Annotation.timestamp. +// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp. +// +// Timestamp is nullable for input only. Spans without a timestamp cannot be +// presented in a timeline: Span stores should not output spans missing a +// timestamp. +// +// There are two known edge-cases where this could be absent: both cases +// exist when a collector receives a span in parts and a binary annotation +// precedes a timestamp. This is possible when.. +// - The span is in-flight (ex not yet received a timestamp) +// - The span's start event was lost +// - Duration: Measurement in microseconds of the critical path, if known. Durations of +// less than one microsecond must be rounded up to 1 microsecond. +// +// This value should be set directly, as opposed to implicitly via annotation +// timestamps. Doing so encourages precision decoupled from problems of +// clocks, such as skew or NTP updates causing time to move backwards. +// +// For compatibility with instrumentation that precede this field, collectors +// or span stores can derive this by subtracting Annotation.timestamp. +// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp. +// +// If this field is persisted as unset, zipkin will continue to work, except +// duration query support will be implementation-specific. Similarly, setting +// this field non-atomically is implementation-specific. +// +// This field is i64 vs i32 to support spans longer than 35 minutes. +// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this +// means the trace uses 128 bit traceIds instead of 64 bit. +type Span struct { + TraceID int64 `thrift:"trace_id,1" db:"trace_id" json:"trace_id"` + // unused field # 2 + Name string `thrift:"name,3" db:"name" json:"name"` + ID int64 `thrift:"id,4" db:"id" json:"id"` + ParentID *int64 `thrift:"parent_id,5" db:"parent_id" json:"parent_id,omitempty"` + Annotations []*Annotation `thrift:"annotations,6" db:"annotations" json:"annotations"` + // unused field # 7 + BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" db:"binary_annotations" json:"binary_annotations"` + Debug bool `thrift:"debug,9" db:"debug" json:"debug,omitempty"` + Timestamp *int64 `thrift:"timestamp,10" db:"timestamp" json:"timestamp,omitempty"` + Duration *int64 `thrift:"duration,11" db:"duration" json:"duration,omitempty"` + TraceIDHigh *int64 `thrift:"trace_id_high,12" db:"trace_id_high" json:"trace_id_high,omitempty"` +} + +func NewSpan() *Span { + return &Span{} +} + + +func (p *Span) GetTraceID() int64 { + return p.TraceID +} + +func (p *Span) GetName() string { + return p.Name +} + +func (p *Span) GetID() int64 { + return p.ID +} +var Span_ParentID_DEFAULT int64 +func (p *Span) GetParentID() int64 { + if !p.IsSetParentID() { + return Span_ParentID_DEFAULT + } +return *p.ParentID +} + +func (p *Span) GetAnnotations() []*Annotation { + return p.Annotations +} + +func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation { + return p.BinaryAnnotations +} +var Span_Debug_DEFAULT bool = false + +func (p *Span) GetDebug() bool { + return p.Debug +} +var Span_Timestamp_DEFAULT int64 +func (p *Span) GetTimestamp() int64 { + if !p.IsSetTimestamp() { + return Span_Timestamp_DEFAULT + } +return *p.Timestamp +} +var Span_Duration_DEFAULT int64 +func (p *Span) GetDuration() int64 { + if !p.IsSetDuration() { + return Span_Duration_DEFAULT + } +return *p.Duration +} +var Span_TraceIDHigh_DEFAULT int64 +func (p *Span) GetTraceIDHigh() int64 { + if !p.IsSetTraceIDHigh() { + return Span_TraceIDHigh_DEFAULT + } +return *p.TraceIDHigh +} +func (p *Span) IsSetParentID() bool { + return p.ParentID != nil +} + +func (p *Span) IsSetDebug() bool { + return p.Debug != Span_Debug_DEFAULT +} + +func (p *Span) IsSetTimestamp() bool { + return p.Timestamp != nil +} + +func (p *Span) IsSetDuration() bool { + return p.Duration != nil +} + +func (p *Span) IsSetTraceIDHigh() bool { + return p.TraceIDHigh != nil +} + +func (p *Span) Read(iprot thrift.TProtocol) error { + if _, err := iprot.ReadStructBegin(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) + } + + + for { + _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() + if err != nil { + return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) + } + if fieldTypeId == thrift.STOP { break; } + switch fieldId { + case 1: + if fieldTypeId == thrift.I64 { + if err := p.ReadField1(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 3: + if fieldTypeId == thrift.STRING { + if err := p.ReadField3(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 4: + if fieldTypeId == thrift.I64 { + if err := p.ReadField4(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 5: + if fieldTypeId == thrift.I64 { + if err := p.ReadField5(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 6: + if fieldTypeId == thrift.LIST { + if err := p.ReadField6(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 8: + if fieldTypeId == thrift.LIST { + if err := p.ReadField8(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 9: + if fieldTypeId == thrift.BOOL { + if err := p.ReadField9(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 10: + if fieldTypeId == thrift.I64 { + if err := p.ReadField10(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 11: + if fieldTypeId == thrift.I64 { + if err := p.ReadField11(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + case 12: + if fieldTypeId == thrift.I64 { + if err := p.ReadField12(iprot); err != nil { + return err + } + } else { + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + default: + if err := iprot.Skip(fieldTypeId); err != nil { + return err + } + } + if err := iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) + } + return nil +} + +func (p *Span) ReadField1(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 1: ", err) +} else { + p.TraceID = v +} + return nil +} + +func (p *Span) ReadField3(iprot thrift.TProtocol) error { + if v, err := iprot.ReadString(); err != nil { + return thrift.PrependError("error reading field 3: ", err) +} else { + p.Name = v +} + return nil +} + +func (p *Span) ReadField4(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 4: ", err) +} else { + p.ID = v +} + return nil +} + +func (p *Span) ReadField5(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 5: ", err) +} else { + p.ParentID = &v +} + return nil +} + +func (p *Span) ReadField6(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*Annotation, 0, size) + p.Annotations = tSlice + for i := 0; i < size; i ++ { + _elem0 := &Annotation{} + if err := _elem0.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) + } + p.Annotations = append(p.Annotations, _elem0) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField8(iprot thrift.TProtocol) error { + _, size, err := iprot.ReadListBegin() + if err != nil { + return thrift.PrependError("error reading list begin: ", err) + } + tSlice := make([]*BinaryAnnotation, 0, size) + p.BinaryAnnotations = tSlice + for i := 0; i < size; i ++ { + _elem1 := &BinaryAnnotation{} + if err := _elem1.Read(iprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) + } + p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1) + } + if err := iprot.ReadListEnd(); err != nil { + return thrift.PrependError("error reading list end: ", err) + } + return nil +} + +func (p *Span) ReadField9(iprot thrift.TProtocol) error { + if v, err := iprot.ReadBool(); err != nil { + return thrift.PrependError("error reading field 9: ", err) +} else { + p.Debug = v +} + return nil +} + +func (p *Span) ReadField10(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 10: ", err) +} else { + p.Timestamp = &v +} + return nil +} + +func (p *Span) ReadField11(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 11: ", err) +} else { + p.Duration = &v +} + return nil +} + +func (p *Span) ReadField12(iprot thrift.TProtocol) error { + if v, err := iprot.ReadI64(); err != nil { + return thrift.PrependError("error reading field 12: ", err) +} else { + p.TraceIDHigh = &v +} + return nil +} + +func (p *Span) Write(oprot thrift.TProtocol) error { + if err := oprot.WriteStructBegin("Span"); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) } + if p != nil { + if err := p.writeField1(oprot); err != nil { return err } + if err := p.writeField3(oprot); err != nil { return err } + if err := p.writeField4(oprot); err != nil { return err } + if err := p.writeField5(oprot); err != nil { return err } + if err := p.writeField6(oprot); err != nil { return err } + if err := p.writeField8(oprot); err != nil { return err } + if err := p.writeField9(oprot); err != nil { return err } + if err := p.writeField10(oprot); err != nil { return err } + if err := p.writeField11(oprot); err != nil { return err } + if err := p.writeField12(oprot); err != nil { return err } + } + if err := oprot.WriteFieldStop(); err != nil { + return thrift.PrependError("write field stop error: ", err) } + if err := oprot.WriteStructEnd(); err != nil { + return thrift.PrependError("write struct stop error: ", err) } + return nil +} + +func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err) } + if err := oprot.WriteI64(int64(p.TraceID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err) } + return err +} + +func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err) } + if err := oprot.WriteString(string(p.Name)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err) } + return err +} + +func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err) } + if err := oprot.WriteI64(int64(p.ID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err) } + return err +} + +func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { + if p.IsSetParentID() { + if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err) } + if err := oprot.WriteI64(int64(*p.ParentID)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err) } + } + return err +} + +func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err) } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.Annotations { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err) } + return err +} + +func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { + if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err) } + if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil { + return thrift.PrependError("error writing list begin: ", err) + } + for _, v := range p.BinaryAnnotations { + if err := v.Write(oprot); err != nil { + return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) + } + } + if err := oprot.WriteListEnd(); err != nil { + return thrift.PrependError("error writing list end: ", err) + } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err) } + return err +} + +func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { + if p.IsSetDebug() { + if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err) } + if err := oprot.WriteBool(bool(p.Debug)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err) } + } + return err +} + +func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { + if p.IsSetTimestamp() { + if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err) } + if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err) } + } + return err +} + +func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { + if p.IsSetDuration() { + if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err) } + if err := oprot.WriteI64(int64(*p.Duration)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err) } + } + return err +} + +func (p *Span) writeField12(oprot thrift.TProtocol) (err error) { + if p.IsSetTraceIDHigh() { + if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err) } + if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil { + return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err) } + if err := oprot.WriteFieldEnd(); err != nil { + return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err) } + } + return err +} + +func (p *Span) String() string { + if p == nil { + return "" + } + return fmt.Sprintf("Span(%+v)", *p) +} + diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go new file mode 100644 index 0000000..5754d5f --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go @@ -0,0 +1,440 @@ +package zipkintracer + +import ( + "errors" + "time" + + opentracing "github.com/opentracing/opentracing-go" + "github.com/opentracing/opentracing-go/ext" + + otobserver "github.com/opentracing-contrib/go-observer" + "github.com/openzipkin/zipkin-go-opentracing/flag" +) + +// ErrInvalidEndpoint will be thrown if hostPort parameter is corrupted or host +// can't be resolved +var ErrInvalidEndpoint = errors.New("Invalid Endpoint. Please check hostPort parameter") + +// Tracer extends the opentracing.Tracer interface with methods to +// probe implementation state, for use by zipkintracer consumers. +type Tracer interface { + opentracing.Tracer + + // Options gets the Options used in New() or NewWithOptions(). + Options() TracerOptions +} + +// TracerOptions allows creating a customized Tracer. +type TracerOptions struct { + // shouldSample is a function which is called when creating a new Span and + // determines whether that Span is sampled. The randomized TraceID is supplied + // to allow deterministic sampling decisions to be made across different nodes. + shouldSample func(traceID uint64) bool + // trimUnsampledSpans turns potentially expensive operations on unsampled + // Spans into no-ops. More precisely, tags and log events are silently + // discarded. If NewSpanEventListener is set, the callbacks will still fire. + trimUnsampledSpans bool + // recorder receives Spans which have been finished. + recorder SpanRecorder + // newSpanEventListener can be used to enhance the tracer by effectively + // attaching external code to trace events. See NetTraceIntegrator for a + // practical example, and event.go for the list of possible events. + newSpanEventListener func() func(SpanEvent) + // dropAllLogs turns log events on all Spans into no-ops. + // If NewSpanEventListener is set, the callbacks will still fire. + dropAllLogs bool + // MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero + // value). If a span has more logs than this value, logs are dropped as + // necessary (and replaced with a log describing how many were dropped). + // + // About half of the MaxLogPerSpan logs kept are the oldest logs, and about + // half are the newest logs. + // + // If NewSpanEventListener is set, the callbacks will still fire for all log + // events. This value is ignored if DropAllLogs is true. + maxLogsPerSpan int + // debugAssertSingleGoroutine internally records the ID of the goroutine + // creating each Span and verifies that no operation is carried out on + // it on a different goroutine. + // Provided strictly for development purposes. + // Passing Spans between goroutine without proper synchronization often + // results in use-after-Finish() errors. For a simple example, consider the + // following pseudocode: + // + // func (s *Server) Handle(req http.Request) error { + // sp := s.StartSpan("server") + // defer sp.Finish() + // wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req) + // select { + // case resp := <-wait: + // return resp.Error + // case <-time.After(10*time.Second): + // sp.LogEvent("timed out waiting for processing") + // return ErrTimedOut + // } + // } + // + // This looks reasonable at first, but a request which spends more than ten + // seconds in the queue is abandoned by the main goroutine and its trace + // finished, leading to use-after-finish when the request is finally + // processed. Note also that even joining on to a finished Span via + // StartSpanWithOptions constitutes an illegal operation. + // + // Code bases which do not require (or decide they do not want) Spans to + // be passed across goroutine boundaries can run with this flag enabled in + // tests to increase their chances of spotting wrong-doers. + debugAssertSingleGoroutine bool + // debugAssertUseAfterFinish is provided strictly for development purposes. + // When set, it attempts to exacerbate issues emanating from use of Spans + // after calling Finish by running additional assertions. + debugAssertUseAfterFinish bool + // enableSpanPool enables the use of a pool, so that the tracer reuses spans + // after Finish has been called on it. Adds a slight performance gain as it + // reduces allocations. However, if you have any use-after-finish race + // conditions the code may panic. + enableSpanPool bool + // logger ... + logger Logger + // clientServerSameSpan allows for Zipkin V1 style span per RPC. This places + // both client end and server end of a RPC call into the same span. + clientServerSameSpan bool + // debugMode activates Zipkin's debug request allowing for all Spans originating + // from this tracer to pass through and bypass sampling. Use with extreme care + // as it might flood your system if you have many traces starting from the + // service you are instrumenting. + debugMode bool + // traceID128Bit enables the generation of 128 bit traceIDs in case the tracer + // needs to create a root span. By default regular 64 bit traceIDs are used. + // Regardless of this setting, the library will propagate and support both + // 64 and 128 bit incoming traces from upstream sources. + traceID128Bit bool + + observer otobserver.Observer +} + +// TracerOption allows for functional options. +// See: http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type TracerOption func(opts *TracerOptions) error + +// WithSampler allows one to add a Sampler function +func WithSampler(sampler Sampler) TracerOption { + return func(opts *TracerOptions) error { + opts.shouldSample = sampler + return nil + } +} + +// TrimUnsampledSpans option +func TrimUnsampledSpans(trim bool) TracerOption { + return func(opts *TracerOptions) error { + opts.trimUnsampledSpans = trim + return nil + } +} + +// DropAllLogs option +func DropAllLogs(dropAllLogs bool) TracerOption { + return func(opts *TracerOptions) error { + opts.dropAllLogs = dropAllLogs + return nil + } +} + +// WithLogger option +func WithLogger(logger Logger) TracerOption { + return func(opts *TracerOptions) error { + opts.logger = logger + return nil + } +} + +// DebugAssertSingleGoroutine option +func DebugAssertSingleGoroutine(val bool) TracerOption { + return func(opts *TracerOptions) error { + opts.debugAssertSingleGoroutine = val + return nil + } +} + +// DebugAssertUseAfterFinish option +func DebugAssertUseAfterFinish(val bool) TracerOption { + return func(opts *TracerOptions) error { + opts.debugAssertUseAfterFinish = val + return nil + } +} + +// TraceID128Bit option +func TraceID128Bit(val bool) TracerOption { + return func(opts *TracerOptions) error { + opts.traceID128Bit = val + return nil + } +} + +// ClientServerSameSpan allows to place client-side and server-side annotations +// for a RPC call in the same span (Zipkin V1 behavior) or different spans +// (more in line with other tracing solutions). By default this Tracer +// uses shared host spans (so client-side and server-side in the same span). +// If using separate spans you might run into trouble with Zipkin V1 as clock +// skew issues can't be remedied at Zipkin server side. +func ClientServerSameSpan(val bool) TracerOption { + return func(opts *TracerOptions) error { + opts.clientServerSameSpan = val + return nil + } +} + +// DebugMode allows to set the tracer to Zipkin debug mode +func DebugMode(val bool) TracerOption { + return func(opts *TracerOptions) error { + opts.debugMode = val + return nil + } +} + +// EnableSpanPool ... +func EnableSpanPool(val bool) TracerOption { + return func(opts *TracerOptions) error { + opts.enableSpanPool = val + return nil + } +} + +// NewSpanEventListener option +func NewSpanEventListener(f func() func(SpanEvent)) TracerOption { + return func(opts *TracerOptions) error { + opts.newSpanEventListener = f + return nil + } +} + +// WithMaxLogsPerSpan option +func WithMaxLogsPerSpan(limit int) TracerOption { + return func(opts *TracerOptions) error { + if limit < 5 || limit > 10000 { + return errors.New("invalid MaxLogsPerSpan limit. Should be between 5 and 10000") + } + opts.maxLogsPerSpan = limit + return nil + } +} + +// NewTracer creates a new OpenTracing compatible Zipkin Tracer. +func NewTracer(recorder SpanRecorder, options ...TracerOption) (opentracing.Tracer, error) { + opts := &TracerOptions{ + recorder: recorder, + shouldSample: alwaysSample, + trimUnsampledSpans: false, + newSpanEventListener: func() func(SpanEvent) { return nil }, + logger: &nopLogger{}, + debugAssertSingleGoroutine: false, + debugAssertUseAfterFinish: false, + clientServerSameSpan: true, + debugMode: false, + traceID128Bit: false, + maxLogsPerSpan: 10000, + observer: nil, + } + for _, o := range options { + err := o(opts) + if err != nil { + return nil, err + } + } + rval := &tracerImpl{options: *opts} + rval.textPropagator = &textMapPropagator{rval} + rval.binaryPropagator = &binaryPropagator{rval} + rval.accessorPropagator = &accessorPropagator{rval} + return rval, nil +} + +// Implements the `Tracer` interface. +type tracerImpl struct { + options TracerOptions + textPropagator *textMapPropagator + binaryPropagator *binaryPropagator + accessorPropagator *accessorPropagator +} + +func (t *tracerImpl) StartSpan( + operationName string, + opts ...opentracing.StartSpanOption, +) opentracing.Span { + sso := opentracing.StartSpanOptions{} + for _, o := range opts { + o.Apply(&sso) + } + return t.startSpanWithOptions(operationName, sso) +} + +func (t *tracerImpl) getSpan() *spanImpl { + if t.options.enableSpanPool { + sp := spanPool.Get().(*spanImpl) + sp.reset() + return sp + } + return &spanImpl{} +} + +func (t *tracerImpl) startSpanWithOptions( + operationName string, + opts opentracing.StartSpanOptions, +) opentracing.Span { + // Start time. + startTime := opts.StartTime + if startTime.IsZero() { + startTime = time.Now() + } + + // Tags. + tags := opts.Tags + + // Build the new span. This is the only allocation: We'll return this as + // an opentracing.Span. + sp := t.getSpan() + + if t.options.observer != nil { + sp.observer, _ = t.options.observer.OnStartSpan(sp, operationName, opts) + } + + // Look for a parent in the list of References. + // + // TODO: would be nice if basictracer did something with all + // References, not just the first one. +ReferencesLoop: + for _, ref := range opts.References { + switch ref.Type { + case opentracing.ChildOfRef: + refCtx := ref.ReferencedContext.(SpanContext) + sp.raw.Context.TraceID = refCtx.TraceID + sp.raw.Context.ParentSpanID = &refCtx.SpanID + sp.raw.Context.Sampled = refCtx.Sampled + sp.raw.Context.Flags = refCtx.Flags + sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed + + if t.options.clientServerSameSpan && + tags[string(ext.SpanKind)] == ext.SpanKindRPCServer.Value { + sp.raw.Context.SpanID = refCtx.SpanID + sp.raw.Context.ParentSpanID = refCtx.ParentSpanID + sp.raw.Context.Owner = false + } else { + sp.raw.Context.SpanID = randomID() + sp.raw.Context.ParentSpanID = &refCtx.SpanID + sp.raw.Context.Owner = true + } + + if l := len(refCtx.Baggage); l > 0 { + sp.raw.Context.Baggage = make(map[string]string, l) + for k, v := range refCtx.Baggage { + sp.raw.Context.Baggage[k] = v + } + } + break ReferencesLoop + case opentracing.FollowsFromRef: + refCtx := ref.ReferencedContext.(SpanContext) + sp.raw.Context.TraceID = refCtx.TraceID + sp.raw.Context.ParentSpanID = &refCtx.SpanID + sp.raw.Context.Sampled = refCtx.Sampled + sp.raw.Context.Flags = refCtx.Flags + sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed + + sp.raw.Context.SpanID = randomID() + sp.raw.Context.ParentSpanID = &refCtx.SpanID + sp.raw.Context.Owner = true + + if l := len(refCtx.Baggage); l > 0 { + sp.raw.Context.Baggage = make(map[string]string, l) + for k, v := range refCtx.Baggage { + sp.raw.Context.Baggage[k] = v + } + } + break ReferencesLoop + } + } + if sp.raw.Context.TraceID.Empty() { + // No parent Span found; allocate new trace and span ids and determine + // the Sampled status. + if t.options.traceID128Bit { + sp.raw.Context.TraceID.High = randomID() + } + sp.raw.Context.TraceID.Low, sp.raw.Context.SpanID = randomID2() + sp.raw.Context.Sampled = t.options.shouldSample(sp.raw.Context.TraceID.Low) + sp.raw.Context.Flags = flag.IsRoot + sp.raw.Context.Owner = true + } + if t.options.debugMode { + sp.raw.Context.Flags |= flag.Debug + } + return t.startSpanInternal( + sp, + operationName, + startTime, + tags, + ) +} + +func (t *tracerImpl) startSpanInternal( + sp *spanImpl, + operationName string, + startTime time.Time, + tags opentracing.Tags, +) opentracing.Span { + sp.tracer = t + if t.options.newSpanEventListener != nil { + sp.event = t.options.newSpanEventListener() + } + sp.raw.Operation = operationName + sp.raw.Start = startTime + sp.raw.Duration = -1 + sp.raw.Tags = tags + + if t.options.debugAssertSingleGoroutine { + sp.SetTag(debugGoroutineIDTag, curGoroutineID()) + } + defer sp.onCreate(operationName) + return sp +} + +type delegatorType struct{} + +// Delegator is the format to use for DelegatingCarrier. +var Delegator delegatorType + +func (t *tracerImpl) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error { + switch format { + case opentracing.TextMap, opentracing.HTTPHeaders: + return t.textPropagator.Inject(sc, carrier) + case opentracing.Binary: + return t.binaryPropagator.Inject(sc, carrier) + } + if _, ok := format.(delegatorType); ok { + return t.accessorPropagator.Inject(sc, carrier) + } + return opentracing.ErrUnsupportedFormat +} + +func (t *tracerImpl) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) { + switch format { + case opentracing.TextMap, opentracing.HTTPHeaders: + return t.textPropagator.Extract(carrier) + case opentracing.Binary: + return t.binaryPropagator.Extract(carrier) + } + if _, ok := format.(delegatorType); ok { + return t.accessorPropagator.Extract(carrier) + } + return nil, opentracing.ErrUnsupportedFormat +} + +func (t *tracerImpl) Options() TracerOptions { + return t.options +} + +// WithObserver assigns an initialized observer to opts.observer +func WithObserver(observer otobserver.Observer) TracerOption { + return func(opts *TracerOptions) error { + opts.observer = observer + return nil + } +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go new file mode 100644 index 0000000..18a1b59 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go @@ -0,0 +1,40 @@ +package types + +import ( + "fmt" + "strconv" +) + +// TraceID is a 128 bit number internally stored as 2x uint64 (high & low). +type TraceID struct { + High uint64 + Low uint64 +} + +// TraceIDFromHex returns the TraceID from a Hex string. +func TraceIDFromHex(h string) (t TraceID, err error) { + if len(h) > 16 { + if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil { + return + } + t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64) + return + } + t.Low, err = strconv.ParseUint(h, 16, 64) + return +} + +// ToHex outputs the 128-bit traceID as hex string. +func (t TraceID) ToHex() string { + if t.High == 0 { + return strconv.FormatUint(t.Low, 16) + } + return fmt.Sprintf( + "%016s%016s", strconv.FormatUint(t.High, 16), strconv.FormatUint(t.Low, 16), + ) +} + +// Empty returns if TraceID has zero value +func (t TraceID) Empty() bool { + return t.Low == 0 && t.High == 0 +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go new file mode 100644 index 0000000..2706615 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go @@ -0,0 +1,25 @@ +package zipkintracer + +import ( + "math/rand" + "sync" + "time" +) + +var ( + seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano())) + // The golang rand generators are *not* intrinsically thread-safe. + seededIDLock sync.Mutex +) + +func randomID() uint64 { + seededIDLock.Lock() + defer seededIDLock.Unlock() + return uint64(seededIDGen.Int63()) +} + +func randomID2() (uint64, uint64) { + seededIDLock.Lock() + defer seededIDLock.Unlock() + return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63()) +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go new file mode 100644 index 0000000..7936499 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go @@ -0,0 +1,65 @@ +package wire + +import ( + "github.com/openzipkin/zipkin-go-opentracing/flag" + "github.com/openzipkin/zipkin-go-opentracing/types" +) + +// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the +// the underlying datastructure. The reason for implementing DelagatingCarrier +// is to allow for end users to serialize the underlying protocol buffers using +// jsonpb or any other serialization forms they want. +type ProtobufCarrier TracerState + +// SetState set's the tracer state. +func (p *ProtobufCarrier) SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) { + p.TraceId = traceID.Low + p.TraceIdHigh = traceID.High + p.SpanId = spanID + if parentSpanID == nil { + flags |= flag.IsRoot + p.ParentSpanId = 0 + } else { + flags &^= flag.IsRoot + p.ParentSpanId = *parentSpanID + } + flags |= flag.SamplingSet + if sampled { + flags |= flag.Sampled + p.Sampled = sampled + } else { + flags &^= flag.Sampled + } + p.Flags = uint64(flags) +} + +// State returns the tracer state. +func (p *ProtobufCarrier) State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) { + traceID.Low = p.TraceId + traceID.High = p.TraceIdHigh + spanID = p.SpanId + sampled = p.Sampled + flags = flag.Flags(p.Flags) + if flags&flag.IsRoot == 0 { + parentSpanID = &p.ParentSpanId + } + return traceID, spanID, parentSpanID, sampled, flags +} + +// SetBaggageItem sets a baggage item. +func (p *ProtobufCarrier) SetBaggageItem(key, value string) { + if p.BaggageItems == nil { + p.BaggageItems = map[string]string{key: value} + return + } + + p.BaggageItems[key] = value +} + +// GetBaggage iterates over each baggage item and executes the callback with +// the key:value pair. +func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) { + for k, v := range p.BaggageItems { + f(k, v) + } +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go new file mode 100644 index 0000000..86242a9 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go @@ -0,0 +1,6 @@ +package wire + +//go:generate protoc --gogofaster_out=$GOPATH/src/github.com/openzipkin/zipkin-go-opentracing/wire wire.proto + +// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the +// gogofaster generator binary. diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go new file mode 100644 index 0000000..df2c119 --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go @@ -0,0 +1,647 @@ +// Code generated by protoc-gen-gogo. +// source: wire.proto +// DO NOT EDIT! + +/* + Package wire is a generated protocol buffer package. + + It is generated from these files: + wire.proto + + It has these top-level messages: + TracerState +*/ +package wire + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type TracerState struct { + TraceId uint64 `protobuf:"fixed64,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + Sampled bool `protobuf:"varint,3,opt,name=sampled,proto3" json:"sampled,omitempty"` + BaggageItems map[string]string `protobuf:"bytes,4,rep,name=baggage_items,json=baggageItems" json:"baggage_items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + TraceIdHigh uint64 `protobuf:"fixed64,20,opt,name=trace_id_high,json=traceIdHigh,proto3" json:"trace_id_high,omitempty"` + ParentSpanId uint64 `protobuf:"fixed64,21,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + Flags uint64 `protobuf:"fixed64,22,opt,name=flags,proto3" json:"flags,omitempty"` +} + +func (m *TracerState) Reset() { *m = TracerState{} } +func (m *TracerState) String() string { return proto.CompactTextString(m) } +func (*TracerState) ProtoMessage() {} +func (*TracerState) Descriptor() ([]byte, []int) { return fileDescriptorWire, []int{0} } + +func (m *TracerState) GetTraceId() uint64 { + if m != nil { + return m.TraceId + } + return 0 +} + +func (m *TracerState) GetSpanId() uint64 { + if m != nil { + return m.SpanId + } + return 0 +} + +func (m *TracerState) GetSampled() bool { + if m != nil { + return m.Sampled + } + return false +} + +func (m *TracerState) GetBaggageItems() map[string]string { + if m != nil { + return m.BaggageItems + } + return nil +} + +func (m *TracerState) GetTraceIdHigh() uint64 { + if m != nil { + return m.TraceIdHigh + } + return 0 +} + +func (m *TracerState) GetParentSpanId() uint64 { + if m != nil { + return m.ParentSpanId + } + return 0 +} + +func (m *TracerState) GetFlags() uint64 { + if m != nil { + return m.Flags + } + return 0 +} + +func init() { + proto.RegisterType((*TracerState)(nil), "zipkintracer_go.wire.TracerState") +} +func (m *TracerState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TracerState) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.TraceId != 0 { + dAtA[i] = 0x9 + i++ + i = encodeFixed64Wire(dAtA, i, uint64(m.TraceId)) + } + if m.SpanId != 0 { + dAtA[i] = 0x11 + i++ + i = encodeFixed64Wire(dAtA, i, uint64(m.SpanId)) + } + if m.Sampled { + dAtA[i] = 0x18 + i++ + if m.Sampled { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.BaggageItems) > 0 { + for k, _ := range m.BaggageItems { + dAtA[i] = 0x22 + i++ + v := m.BaggageItems[k] + mapSize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v))) + i = encodeVarintWire(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintWire(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintWire(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + if m.TraceIdHigh != 0 { + dAtA[i] = 0xa1 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Wire(dAtA, i, uint64(m.TraceIdHigh)) + } + if m.ParentSpanId != 0 { + dAtA[i] = 0xa9 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Wire(dAtA, i, uint64(m.ParentSpanId)) + } + if m.Flags != 0 { + dAtA[i] = 0xb1 + i++ + dAtA[i] = 0x1 + i++ + i = encodeFixed64Wire(dAtA, i, uint64(m.Flags)) + } + return i, nil +} + +func encodeFixed64Wire(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Wire(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintWire(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *TracerState) Size() (n int) { + var l int + _ = l + if m.TraceId != 0 { + n += 9 + } + if m.SpanId != 0 { + n += 9 + } + if m.Sampled { + n += 2 + } + if len(m.BaggageItems) > 0 { + for k, v := range m.BaggageItems { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v))) + n += mapEntrySize + 1 + sovWire(uint64(mapEntrySize)) + } + } + if m.TraceIdHigh != 0 { + n += 10 + } + if m.ParentSpanId != 0 { + n += 10 + } + if m.Flags != 0 { + n += 10 + } + return n +} + +func sovWire(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWire(x uint64) (n int) { + return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *TracerState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TracerState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TracerState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType) + } + m.TraceId = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + m.TraceId = uint64(dAtA[iNdEx-8]) + m.TraceId |= uint64(dAtA[iNdEx-7]) << 8 + m.TraceId |= uint64(dAtA[iNdEx-6]) << 16 + m.TraceId |= uint64(dAtA[iNdEx-5]) << 24 + m.TraceId |= uint64(dAtA[iNdEx-4]) << 32 + m.TraceId |= uint64(dAtA[iNdEx-3]) << 40 + m.TraceId |= uint64(dAtA[iNdEx-2]) << 48 + m.TraceId |= uint64(dAtA[iNdEx-1]) << 56 + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType) + } + m.SpanId = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + m.SpanId = uint64(dAtA[iNdEx-8]) + m.SpanId |= uint64(dAtA[iNdEx-7]) << 8 + m.SpanId |= uint64(dAtA[iNdEx-6]) << 16 + m.SpanId |= uint64(dAtA[iNdEx-5]) << 24 + m.SpanId |= uint64(dAtA[iNdEx-4]) << 32 + m.SpanId |= uint64(dAtA[iNdEx-3]) << 40 + m.SpanId |= uint64(dAtA[iNdEx-2]) << 48 + m.SpanId |= uint64(dAtA[iNdEx-1]) << 56 + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Sampled", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Sampled = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BaggageItems", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthWire + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + var keykey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + keykey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthWire + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey := string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + if m.BaggageItems == nil { + m.BaggageItems = make(map[string]string) + } + if iNdEx < postIndex { + var valuekey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + valuekey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWire + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthWire + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + m.BaggageItems[mapkey] = mapvalue + } else { + var mapvalue string + m.BaggageItems[mapkey] = mapvalue + } + iNdEx = postIndex + case 20: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field TraceIdHigh", wireType) + } + m.TraceIdHigh = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + m.TraceIdHigh = uint64(dAtA[iNdEx-8]) + m.TraceIdHigh |= uint64(dAtA[iNdEx-7]) << 8 + m.TraceIdHigh |= uint64(dAtA[iNdEx-6]) << 16 + m.TraceIdHigh |= uint64(dAtA[iNdEx-5]) << 24 + m.TraceIdHigh |= uint64(dAtA[iNdEx-4]) << 32 + m.TraceIdHigh |= uint64(dAtA[iNdEx-3]) << 40 + m.TraceIdHigh |= uint64(dAtA[iNdEx-2]) << 48 + m.TraceIdHigh |= uint64(dAtA[iNdEx-1]) << 56 + case 21: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType) + } + m.ParentSpanId = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + m.ParentSpanId = uint64(dAtA[iNdEx-8]) + m.ParentSpanId |= uint64(dAtA[iNdEx-7]) << 8 + m.ParentSpanId |= uint64(dAtA[iNdEx-6]) << 16 + m.ParentSpanId |= uint64(dAtA[iNdEx-5]) << 24 + m.ParentSpanId |= uint64(dAtA[iNdEx-4]) << 32 + m.ParentSpanId |= uint64(dAtA[iNdEx-3]) << 40 + m.ParentSpanId |= uint64(dAtA[iNdEx-2]) << 48 + m.ParentSpanId |= uint64(dAtA[iNdEx-1]) << 56 + case 22: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType) + } + m.Flags = 0 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + m.Flags = uint64(dAtA[iNdEx-8]) + m.Flags |= uint64(dAtA[iNdEx-7]) << 8 + m.Flags |= uint64(dAtA[iNdEx-6]) << 16 + m.Flags |= uint64(dAtA[iNdEx-5]) << 24 + m.Flags |= uint64(dAtA[iNdEx-4]) << 32 + m.Flags |= uint64(dAtA[iNdEx-3]) << 40 + m.Flags |= uint64(dAtA[iNdEx-2]) << 48 + m.Flags |= uint64(dAtA[iNdEx-1]) << 56 + default: + iNdEx = preIndex + skippy, err := skipWire(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWire + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWire(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWire + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWire + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWire(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWire = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("wire.proto", fileDescriptorWire) } + +var fileDescriptorWire = []byte{ + // 300 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a, + 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xa9, 0xca, 0x2c, 0xc8, 0xce, 0xcc, 0x2b, 0x29, + 0x4a, 0x4c, 0x4e, 0x2d, 0x8a, 0x4f, 0xcf, 0xd7, 0x03, 0xc9, 0x29, 0x5d, 0x63, 0xe2, 0xe2, 0x0e, + 0x01, 0x0b, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x49, 0x72, 0x71, 0x80, 0x55, 0xc4, 0x67, 0xa6, + 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x05, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0xe2, 0x5c, 0xec, + 0xc5, 0x05, 0x89, 0x79, 0x20, 0x19, 0x26, 0xb0, 0x0c, 0x1b, 0x88, 0xeb, 0x99, 0x22, 0x24, 0xc1, + 0xc5, 0x5e, 0x9c, 0x98, 0x5b, 0x90, 0x93, 0x9a, 0x22, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x11, 0x04, + 0xe3, 0x0a, 0x45, 0x70, 0xf1, 0x26, 0x25, 0xa6, 0xa7, 0x27, 0xa6, 0xa7, 0xc6, 0x67, 0x96, 0xa4, + 0xe6, 0x16, 0x4b, 0xb0, 0x28, 0x30, 0x6b, 0x70, 0x1b, 0x19, 0xeb, 0x61, 0x73, 0x8b, 0x1e, 0x92, + 0x3b, 0xf4, 0x9c, 0x20, 0xda, 0x3c, 0x41, 0xba, 0x5c, 0xf3, 0x4a, 0x8a, 0x2a, 0x83, 0x78, 0x92, + 0x90, 0x84, 0x84, 0x94, 0xb8, 0x78, 0x61, 0xee, 0x8c, 0xcf, 0xc8, 0x4c, 0xcf, 0x90, 0x10, 0x01, + 0x3b, 0x89, 0x1b, 0xea, 0x58, 0x8f, 0xcc, 0xf4, 0x0c, 0x21, 0x15, 0x2e, 0xbe, 0x82, 0xc4, 0xa2, + 0xd4, 0xbc, 0x92, 0x78, 0x98, 0xbb, 0x45, 0xc1, 0x8a, 0x78, 0x20, 0xa2, 0xc1, 0x10, 0xd7, 0x8b, + 0x70, 0xb1, 0xa6, 0xe5, 0x24, 0xa6, 0x17, 0x4b, 0x88, 0x81, 0x25, 0x21, 0x1c, 0x29, 0x7b, 0x2e, + 0x41, 0x0c, 0x27, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x82, 0xc3, 0x85, 0x33, 0x08, 0xc4, + 0x04, 0x69, 0x2e, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0x87, 0x08, 0x67, 0x10, 0x84, 0x63, 0xc5, 0x64, + 0xc1, 0xe8, 0x24, 0x76, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, + 0x4e, 0x78, 0x2c, 0xc7, 0x10, 0xc5, 0x02, 0xf2, 0x64, 0x12, 0x1b, 0x38, 0x36, 0x8c, 0x01, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xb5, 0x5e, 0x0d, 0x33, 0x9b, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go new file mode 100644 index 0000000..e06ca4c --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go @@ -0,0 +1,72 @@ +package zipkintracer + +import ( + "encoding/binary" + "net" + "strconv" + "strings" + + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" +) + +// makeEndpoint takes the hostport and service name that represent this Zipkin +// service, and returns an endpoint that's embedded into the Zipkin core Span +// type. It will return a nil endpoint if the input parameters are malformed. +func makeEndpoint(hostport, serviceName string) (ep *zipkincore.Endpoint) { + ep = zipkincore.NewEndpoint() + + // Set the ServiceName + ep.ServiceName = serviceName + + if strings.IndexByte(hostport, ':') < 0 { + // "" becomes ":0" + hostport = hostport + ":0" + } + + // try to parse provided ":" + host, port, err := net.SplitHostPort(hostport) + if err != nil { + // if unparsable, return as "undefined:0" + return + } + + // try to set port number + p, _ := strconv.ParseUint(port, 10, 16) + ep.Port = int16(p) + + // if is a domain name, look it up + addrs, err := net.LookupIP(host) + if err != nil { + // return as "undefined:" + return + } + + var addr4, addr16 net.IP + for i := range addrs { + addr := addrs[i].To4() + if addr == nil { + // IPv6 + if addr16 == nil { + addr16 = addrs[i].To16() // IPv6 - 16 bytes + } + } else { + // IPv4 + if addr4 == nil { + addr4 = addr // IPv4 - 4 bytes + } + } + if addr16 != nil && addr4 != nil { + // IPv4 & IPv6 have been set, we can stop looking further + break + } + } + // default to 0 filled 4 byte array for IPv4 if IPv6 only host was found + if addr4 == nil { + addr4 = make([]byte, 4) + } + + // set IPv4 and IPv6 addresses + ep.Ipv4 = (int32)(binary.BigEndian.Uint32(addr4)) + ep.Ipv6 = []byte(addr16) + return +} diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go new file mode 100644 index 0000000..2aee71e --- /dev/null +++ b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go @@ -0,0 +1,287 @@ +package zipkintracer + +import ( + "encoding/binary" + "fmt" + "math" + "net" + "strconv" + "time" + + otext "github.com/opentracing/opentracing-go/ext" + "github.com/opentracing/opentracing-go/log" + + "github.com/openzipkin/zipkin-go-opentracing/flag" + "github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore" +) + +var ( + // SpanKindResource will be regarded as a SA annotation by Zipkin. + SpanKindResource = otext.SpanKindEnum("resource") +) + +// Recorder implements the SpanRecorder interface. +type Recorder struct { + collector Collector + debug bool + endpoint *zipkincore.Endpoint + materializer func(logFields []log.Field) ([]byte, error) +} + +// RecorderOption allows for functional options. +type RecorderOption func(r *Recorder) + +// WithLogFmtMaterializer will convert OpenTracing Log fields to a LogFmt representation. +func WithLogFmtMaterializer() RecorderOption { + return func(r *Recorder) { + r.materializer = MaterializeWithLogFmt + } +} + +// WithJSONMaterializer will convert OpenTracing Log fields to a JSON representation. +func WithJSONMaterializer() RecorderOption { + return func(r *Recorder) { + r.materializer = MaterializeWithJSON + } +} + +// WithStrictMaterializer will only record event Log fields and discard the rest. +func WithStrictMaterializer() RecorderOption { + return func(r *Recorder) { + r.materializer = StrictZipkinMaterializer + } +} + +// NewRecorder creates a new Zipkin Recorder backed by the provided Collector. +// +// hostPort and serviceName allow you to set the default Zipkin endpoint +// information which will be added to the application's standard core +// annotations. hostPort will be resolved into an IPv4 and/or IPv6 address and +// Port number, serviceName will be used as the application's service +// identifier. +// +// If application does not listen for incoming requests or an endpoint Context +// does not involve network address and/or port these cases can be solved like +// this: +// # port is not applicable: +// NewRecorder(c, debug, "192.168.1.12:0", "ServiceA") +// +// # network address and port are not applicable: +// NewRecorder(c, debug, "0.0.0.0:0", "ServiceB") +func NewRecorder(c Collector, debug bool, hostPort, serviceName string, options ...RecorderOption) SpanRecorder { + r := &Recorder{ + collector: c, + debug: debug, + endpoint: makeEndpoint(hostPort, serviceName), + materializer: MaterializeWithLogFmt, + } + for _, opts := range options { + opts(r) + } + return r +} + +// RecordSpan converts a RawSpan into the Zipkin representation of a span +// and records it to the underlying collector. +func (r *Recorder) RecordSpan(sp RawSpan) { + if !sp.Context.Sampled { + return + } + + var parentSpanID *int64 + if sp.Context.ParentSpanID != nil { + id := int64(*sp.Context.ParentSpanID) + parentSpanID = &id + } + + var traceIDHigh *int64 + if sp.Context.TraceID.High > 0 { + tidh := int64(sp.Context.TraceID.High) + traceIDHigh = &tidh + } + + span := &zipkincore.Span{ + Name: sp.Operation, + ID: int64(sp.Context.SpanID), + TraceID: int64(sp.Context.TraceID.Low), + TraceIDHigh: traceIDHigh, + ParentID: parentSpanID, + Debug: r.debug || (sp.Context.Flags&flag.Debug == flag.Debug), + } + // only send timestamp and duration if this process owns the current span. + if sp.Context.Owner { + timestamp := sp.Start.UnixNano() / 1e3 + duration := sp.Duration.Nanoseconds() / 1e3 + // since we always time our spans we will round up to 1 microsecond if the + // span took less. + if duration == 0 { + duration = 1 + } + span.Timestamp = ×tamp + span.Duration = &duration + } + if kind, ok := sp.Tags[string(otext.SpanKind)]; ok { + switch kind { + case otext.SpanKindRPCClient, otext.SpanKindRPCClientEnum: + annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint) + annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint) + case otext.SpanKindRPCServer, otext.SpanKindRPCServerEnum: + annotate(span, sp.Start, zipkincore.SERVER_RECV, r.endpoint) + annotate(span, sp.Start.Add(sp.Duration), zipkincore.SERVER_SEND, r.endpoint) + case SpanKindResource: + serviceName, ok := sp.Tags[string(otext.PeerService)] + if !ok { + serviceName = r.endpoint.GetServiceName() + } + host, ok := sp.Tags[string(otext.PeerHostname)].(string) + if !ok { + if r.endpoint.GetIpv4() > 0 { + ip := make([]byte, 4) + binary.BigEndian.PutUint32(ip, uint32(r.endpoint.GetIpv4())) + host = net.IP(ip).To4().String() + } else { + ip := r.endpoint.GetIpv6() + host = net.IP(ip).String() + } + } + var sPort string + port, ok := sp.Tags[string(otext.PeerPort)] + if !ok { + sPort = strconv.FormatInt(int64(r.endpoint.GetPort()), 10) + } else { + sPort = strconv.FormatInt(int64(port.(uint16)), 10) + } + re := makeEndpoint(net.JoinHostPort(host, sPort), serviceName.(string)) + if re != nil { + annotateBinary(span, zipkincore.SERVER_ADDR, serviceName, re) + } else { + fmt.Printf("endpoint creation failed: host: %q port: %q", host, sPort) + } + annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint) + annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint) + default: + annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint) + } + } else { + annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint) + } + + for key, value := range sp.Tags { + annotateBinary(span, key, value, r.endpoint) + } + + for _, spLog := range sp.Logs { + if len(spLog.Fields) == 1 && spLog.Fields[0].Key() == "event" { + // proper Zipkin annotation + annotate(span, spLog.Timestamp, fmt.Sprintf("%+v", spLog.Fields[0].Value()), r.endpoint) + continue + } + // OpenTracing Log with key-value pair(s). Try to materialize using the + // materializer chosen for the recorder. + if logs, err := r.materializer(spLog.Fields); err != nil { + fmt.Printf("Materialization of OpenTracing LogFields failed: %+v", err) + } else { + annotate(span, spLog.Timestamp, string(logs), r.endpoint) + } + } + _ = r.collector.Collect(span) +} + +// annotate annotates the span with the given value. +func annotate(span *zipkincore.Span, timestamp time.Time, value string, host *zipkincore.Endpoint) { + if timestamp.IsZero() { + timestamp = time.Now() + } + span.Annotations = append(span.Annotations, &zipkincore.Annotation{ + Timestamp: timestamp.UnixNano() / 1e3, + Value: value, + Host: host, + }) +} + +// annotateBinary annotates the span with a key and a value that will be []byte +// encoded. +func annotateBinary(span *zipkincore.Span, key string, value interface{}, host *zipkincore.Endpoint) { + var a zipkincore.AnnotationType + var b []byte + // We are not using zipkincore.AnnotationType_I16 for types that could fit + // as reporting on it seems to be broken on the zipkin web interface + // (however, we can properly extract the number from zipkin storage + // directly). int64 has issues with negative numbers but seems ok for + // positive numbers needing more than 32 bit. + switch v := value.(type) { + case bool: + a = zipkincore.AnnotationType_BOOL + b = []byte("\x00") + if v { + b = []byte("\x01") + } + case []byte: + a = zipkincore.AnnotationType_BYTES + b = v + case byte: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(v)) + case int8: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(v)) + case int16: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(v)) + case uint16: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(v)) + case int32: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 4) + binary.BigEndian.PutUint32(b, uint32(v)) + case uint32: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 4) + binary.BigEndian.PutUint32(b, v) + case int64: + a = zipkincore.AnnotationType_I64 + b = make([]byte, 8) + binary.BigEndian.PutUint64(b, uint64(v)) + case int: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 8) + binary.BigEndian.PutUint32(b, uint32(v)) + case uint: + a = zipkincore.AnnotationType_I32 + b = make([]byte, 8) + binary.BigEndian.PutUint32(b, uint32(v)) + case uint64: + a = zipkincore.AnnotationType_I64 + b = make([]byte, 8) + binary.BigEndian.PutUint64(b, v) + case float32: + a = zipkincore.AnnotationType_DOUBLE + b = make([]byte, 8) + bits := math.Float64bits(float64(v)) + binary.BigEndian.PutUint64(b, bits) + case float64: + a = zipkincore.AnnotationType_DOUBLE + b = make([]byte, 8) + bits := math.Float64bits(v) + binary.BigEndian.PutUint64(b, bits) + case string: + a = zipkincore.AnnotationType_STRING + b = []byte(v) + default: + // we have no handler for type's value, but let's get a string + // representation of it. + a = zipkincore.AnnotationType_STRING + b = []byte(fmt.Sprintf("%+v", value)) + } + span.BinaryAnnotations = append(span.BinaryAnnotations, &zipkincore.BinaryAnnotation{ + Key: key, + Value: b, + AnnotationType: a, + Host: host, + }) +} diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go new file mode 100644 index 0000000..145eec2 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/block.go @@ -0,0 +1,445 @@ +package lz4 + +import ( + "encoding/binary" + "errors" +) + +// block represents a frame data block. +// Used when compressing or decompressing frame blocks concurrently. +type block struct { + compressed bool + zdata []byte // compressed data + data []byte // decompressed data + offset int // offset within the data as with block dependency the 64Kb window is prepended to it + checksum uint32 // compressed data checksum + err error // error while [de]compressing +} + +var ( + // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted. + ErrInvalidSource = errors.New("lz4: invalid source") + // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when + // the supplied buffer for [de]compression is too small. + ErrShortBuffer = errors.New("lz4: short buffer") +) + +// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible. +func CompressBlockBound(n int) int { + return n + n/255 + 16 +} + +// UncompressBlock decompresses the source buffer into the destination one, +// starting at the di index and returning the decompressed size. +// +// The destination buffer must be sized appropriately. +// +// An error is returned if the source data is invalid or the destination buffer is too small. +func UncompressBlock(src, dst []byte, di int) (int, error) { + si, sn, di0 := 0, len(src), di + if sn == 0 { + return 0, nil + } + + for { + // literals and match lengths (token) + lLen := int(src[si] >> 4) + mLen := int(src[si] & 0xF) + if si++; si == sn { + return di, ErrInvalidSource + } + + // literals + if lLen > 0 { + if lLen == 0xF { + for src[si] == 0xFF { + lLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + lLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + if len(dst)-di < lLen || si+lLen > sn { + return di - di0, ErrShortBuffer + } + di += copy(dst[di:], src[si:si+lLen]) + + if si += lLen; si >= sn { + return di - di0, nil + } + } + + if si += 2; si >= sn { + return di, ErrInvalidSource + } + offset := int(src[si-2]) | int(src[si-1])<<8 + if di-offset < 0 || offset == 0 { + return di - di0, ErrInvalidSource + } + + // match + if mLen == 0xF { + for src[si] == 0xFF { + mLen += 0xFF + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + mLen += int(src[si]) + if si++; si == sn { + return di - di0, ErrInvalidSource + } + } + // minimum match length is 4 + mLen += 4 + if len(dst)-di <= mLen { + return di - di0, ErrShortBuffer + } + + // copy the match (NB. match is at least 4 bytes long) + // NB. past di, copy() would write old bytes instead of + // the ones we just copied, so split the work into the largest chunk. + for ; mLen >= offset; mLen -= offset { + di += copy(dst[di:], dst[di-offset:di]) + } + di += copy(dst[di:], dst[di-offset:di-offset+mLen]) + } +} + +// CompressBlock compresses the source buffer starting at soffet into the destination one. +// This is the fast version of LZ4 compression and also the default one. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlock(src, dst []byte, soffset int) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // fast scan strategy: + // we only need a hash table to store the last sequences (4 bytes) + var hashTable [1 << hashLog]int + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + si++ + hashTable[h] = si + } + + anchor := si + fma := 1 << skipStrength + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + // -1 to separate existing entries from new ones + ref := hashTable[h] - 1 + // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving) + hashTable[h] = si + 1 + // no need to check the last 3 bytes in the first literal 4 bytes as + // this guarantees that the next match, if any, is compressed with + // a lower size, since to have some compression we must have: + // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size) + // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap + // and by definition we do have: + // ll >= 1, ml >= 4 + // => ll+ml >= 5 + // => so overlap must be 0 + + // the sequence is new, out of bound (64kb) or not valid: try next sequence + if ref < 0 || fma&(1<>winSizeLog > 0 || + src[ref] != src[si] || + src[ref+1] != src[si+1] || + src[ref+2] != src[si+2] || + src[ref+3] != src[si+3] { + // variable step: improves performance on non-compressible data + si += fma >> skipStrength + fma++ + continue + } + // match found + fma = 1 << skipStrength + lLen := si - anchor + offset := si - ref + + // encode match length part 1 + si += minMatch + mLen := si // match length has minMatch already + for si <= sn && src[si] == src[si-offset] { + si++ + } + mLen = si - mLen + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} + +// CompressBlockHC compresses the source buffer starting at soffet into the destination one. +// CompressBlockHC compression ratio is better than CompressBlock but it is also slower. +// +// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible. +// +// An error is returned if the destination buffer is too small. +func CompressBlockHC(src, dst []byte, soffset int) (int, error) { + sn, dn := len(src)-mfLimit, len(dst) + if sn <= 0 || dn == 0 || soffset >= sn { + return 0, nil + } + var si, di int + + // Hash Chain strategy: + // we need a hash table and a chain table + // the chain table cannot contain more entries than the window size (64Kb entries) + var hashTable [1 << hashLog]int + var chainTable [winSize]int + var hashShift = uint((minMatch * 8) - hashLog) + + // Initialise the hash table with the first 64Kb of the input buffer + // (used when compressing dependent blocks) + for si < soffset { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + anchor := si + for si < sn-minMatch { + // hash the next 4 bytes (sequence)... + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + + // follow the chain until out of window and give the longest match + mLen := 0 + offset := 0 + for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 { + // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length + if src[next+mLen] == src[si+mLen] { + for ml := 0; ; ml++ { + if src[next+ml] != src[si+ml] || si+ml > sn { + // found a longer match, keep its position and length + if mLen < ml && ml >= minMatch { + mLen = ml + offset = si - next + } + break + } + } + } + } + chainTable[si&winMask] = hashTable[h] + hashTable[h] = si + 1 + + // no match found + if mLen == 0 { + si++ + continue + } + + // match found + // update hash/chain tables with overlaping bytes: + // si already hashed, add everything from si+1 up to the match length + for si, ml := si+1, si+mLen; si < ml; { + h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift + chainTable[si&winMask] = hashTable[h] + si++ + hashTable[h] = si + } + + lLen := si - anchor + si += mLen + mLen -= minMatch // match length does not include minMatch + + if mLen < 0xF { + dst[di] = byte(mLen) + } else { + dst[di] = 0xF + } + + // encode literals length + if lLen < 0xF { + dst[di] |= byte(lLen << 4) + } else { + dst[di] |= 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + l := lLen - 0xF + for ; l >= 0xFF; l -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(l) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // literals + if di+lLen >= dn { + return di, ErrShortBuffer + } + di += copy(dst[di:], src[anchor:anchor+lLen]) + anchor = si + + // encode offset + if di += 2; di >= dn { + return di, ErrShortBuffer + } + dst[di-2], dst[di-1] = byte(offset), byte(offset>>8) + + // encode match length part 2 + if mLen >= 0xF { + for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(mLen) + if di++; di == dn { + return di, ErrShortBuffer + } + } + } + + if anchor == 0 { + // incompressible + return 0, nil + } + + // last literals + lLen := len(src) - anchor + if lLen < 0xF { + dst[di] = byte(lLen << 4) + } else { + dst[di] = 0xF0 + if di++; di == dn { + return di, ErrShortBuffer + } + lLen -= 0xF + for ; lLen >= 0xFF; lLen -= 0xFF { + dst[di] = 0xFF + if di++; di == dn { + return di, ErrShortBuffer + } + } + dst[di] = byte(lLen) + } + if di++; di == dn { + return di, ErrShortBuffer + } + + // write literals + src = src[anchor:] + switch n := di + len(src); { + case n > dn: + return di, ErrShortBuffer + case n >= sn: + // incompressible + return 0, nil + } + di += copy(dst[di:], src) + return di, nil +} diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go new file mode 100644 index 0000000..ddb82f6 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/lz4.go @@ -0,0 +1,105 @@ +// Package lz4 implements reading and writing lz4 compressed data (a frame), +// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html, +// using an io.Reader (decompression) and io.Writer (compression). +// It is designed to minimize memory usage while maximizing throughput by being able to +// [de]compress data concurrently. +// +// The Reader and the Writer support concurrent processing provided the supplied buffers are +// large enough (in multiples of BlockMaxSize) and there is no block dependency. +// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently. +// The runtime.GOMAXPROCS() value is used to apply concurrency or not. +// +// Although the block level compression and decompression functions are exposed and are fully compatible +// with the lz4 block format definition, they are low level and should not be used directly. +// For a complete description of an lz4 compressed block, see: +// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html +// +// See https://github.com/Cyan4973/lz4 for the reference C implementation. +package lz4 + +import ( + "hash" + "sync" + + "github.com/pierrec/xxHash/xxHash32" +) + +const ( + // Extension is the LZ4 frame file name extension + Extension = ".lz4" + // Version is the LZ4 frame format version + Version = 1 + + frameMagic = uint32(0x184D2204) + frameSkipMagic = uint32(0x184D2A50) + + // The following constants are used to setup the compression algorithm. + minMatch = 4 // the minimum size of the match sequence size (4 bytes) + winSizeLog = 16 // LZ4 64Kb window size limit + winSize = 1 << winSizeLog + winMask = winSize - 1 // 64Kb window of previous data for dependent blocks + + // hashLog determines the size of the hash table used to quickly find a previous match position. + // Its value influences the compression speed and memory usage, the lower the faster, + // but at the expense of the compression ratio. + // 16 seems to be the best compromise. + hashLog = 16 + hashTableSize = 1 << hashLog + hashShift = uint((minMatch * 8) - hashLog) + + mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes. + skipStrength = 6 // variable step for fast scan + + hasher = uint32(2654435761) // prime number used to hash minMatch +) + +// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb. +var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20} +var bsMapValue = map[int]byte{} + +// Reversed. +func init() { + for i, v := range bsMapID { + bsMapValue[v] = i + } +} + +// Header describes the various flags that can be set on a Writer or obtained from a Reader. +// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html). +// +// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls. +// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency). +type Header struct { + BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one) + BlockChecksum bool // compressed blocks are checksumed + NoChecksum bool // frame checksum + BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB. + Size uint64 // the frame total size. It is _not_ computed by the Writer. + HighCompression bool // use high compression (only for the Writer) + done bool // whether the descriptor was processed (Read or Write and checked) + // Removed as not supported + // Dict bool // a dictionary id is to be used + // DictID uint32 // the dictionary id read from the frame, if any. +} + +// xxhPool wraps the standard pool for xxHash items. +// Putting items back in the pool automatically resets them. +type xxhPool struct { + sync.Pool +} + +func (p *xxhPool) Get() hash.Hash32 { + return p.Pool.Get().(hash.Hash32) +} + +func (p *xxhPool) Put(h hash.Hash32) { + h.Reset() + p.Pool.Put(h) +} + +// hashPool is used by readers and writers and contains xxHash items. +var hashPool = xxhPool{ + Pool: sync.Pool{ + New: func() interface{} { return xxHash32.New(0) }, + }, +} diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go new file mode 100644 index 0000000..9f7fd60 --- /dev/null +++ b/vendor/github.com/pierrec/lz4/reader.go @@ -0,0 +1,364 @@ +package lz4 + +import ( + "encoding/binary" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "runtime" + "sync" + "sync/atomic" +) + +// ErrInvalid is returned when the data being read is not an LZ4 archive +// (LZ4 magic number detection failed). +var ErrInvalid = errors.New("invalid lz4 data") + +// errEndOfBlock is returned by readBlock when it has reached the last block of the frame. +// It is not an error. +var errEndOfBlock = errors.New("end of block") + +// Reader implements the LZ4 frame decoder. +// The Header is set after the first call to Read(). +// The Header may change between Read() calls in case of concatenated frames. +type Reader struct { + Pos int64 // position within the source + Header + src io.Reader + checksum hash.Hash32 // frame hash + wg sync.WaitGroup // decompressing go routine wait group + data []byte // buffered decompressed data + window []byte // 64Kb decompressed data window +} + +// NewReader returns a new LZ4 frame decoder. +// No access to the underlying io.Reader is performed. +func NewReader(src io.Reader) *Reader { + return &Reader{ + src: src, + checksum: hashPool.Get(), + } +} + +// readHeader checks the frame magic number and parses the frame descriptoz. +// Skippable frames are supported even as a first frame although the LZ4 +// specifications recommends skippable frames not to be used as first frames. +func (z *Reader) readHeader(first bool) error { + defer z.checksum.Reset() + + for { + var magic uint32 + if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil { + if !first && err == io.ErrUnexpectedEOF { + return io.EOF + } + return err + } + z.Pos += 4 + if magic>>8 == frameSkipMagic>>8 { + var skipSize uint32 + if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil { + return err + } + z.Pos += 4 + m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize)) + z.Pos += m + if err != nil { + return err + } + continue + } + if magic != frameMagic { + return ErrInvalid + } + break + } + + // header + var buf [8]byte + if _, err := io.ReadFull(z.src, buf[:2]); err != nil { + return err + } + z.Pos += 2 + + b := buf[0] + if b>>6 != Version { + return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version) + } + z.BlockDependency = b>>5&1 == 0 + z.BlockChecksum = b>>4&1 > 0 + frameSize := b>>3&1 > 0 + z.NoChecksum = b>>2&1 == 0 + // z.Dict = b&1 > 0 + + bmsID := buf[1] >> 4 & 0x7 + bSize, ok := bsMapID[bmsID] + if !ok { + return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID) + } + z.BlockMaxSize = bSize + + z.checksum.Write(buf[0:2]) + + if frameSize { + if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil { + return err + } + z.Pos += 8 + binary.LittleEndian.PutUint64(buf[:], z.Size) + z.checksum.Write(buf[0:8]) + } + + // if z.Dict { + // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil { + // return err + // } + // z.Pos += 4 + // binary.LittleEndian.PutUint32(buf[:], z.DictID) + // z.checksum.Write(buf[0:4]) + // } + + // header checksum + if _, err := io.ReadFull(z.src, buf[:1]); err != nil { + return err + } + z.Pos++ + if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] { + return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h) + } + + z.Header.done = true + + return nil +} + +// Read decompresses data from the underlying source into the supplied buffer. +// +// Since there can be multiple streams concatenated, Header values may +// change between calls to Read(). If that is the case, no data is actually read from +// the underlying io.Reader, to allow for potential input buffer resizing. +// +// Data is buffered if the input buffer is too small, and exhausted upon successive calls. +// +// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is +// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value. +func (z *Reader) Read(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.readHeader(true); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + // exhaust remaining data from previous Read() + if len(z.data) > 0 { + n = copy(buf, z.data) + z.data = z.data[n:] + if len(z.data) == 0 { + z.data = nil + } + return + } + + // Break up the input buffer into BlockMaxSize blocks with at least one block. + // Then decompress into each of them concurrently if possible (no dependency). + // In case of dependency, the first block will be missing the window (except on the + // very first call), the rest will have it already since it comes from the previous block. + wbuf := buf + zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize + zblocks := make([]block, zn) + for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ { + zb := &zblocks[zi] + // last block may be too small + if len(wbuf) < z.BlockMaxSize+len(z.window) { + wbuf = make([]byte, z.BlockMaxSize+len(z.window)) + } + copy(wbuf, z.window) + if zb.err = z.readBlock(wbuf, zb); zb.err != nil { + break + } + wbuf = wbuf[z.BlockMaxSize:] + if !z.BlockDependency { + z.wg.Add(1) + go z.decompressBlock(zb, &abort) + continue + } + // cannot decompress concurrently when dealing with block dependency + z.decompressBlock(zb, nil) + // the last block may not contain enough data + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + if len(zb.data) >= winSize { + copy(z.window, zb.data[len(zb.data)-winSize:]) + } else { + copy(z.window, z.window[len(zb.data):]) + copy(z.window[len(zb.data)+1:], zb.data) + } + } + z.wg.Wait() + + // since a block size may be less then BlockMaxSize, trim the decompressed buffers + for _, zb := range zblocks { + if zb.err != nil { + if zb.err == errEndOfBlock { + return n, z.close() + } + return n, zb.err + } + bLen := len(zb.data) + if !z.NoChecksum { + z.checksum.Write(zb.data) + } + m := copy(buf[n:], zb.data) + // buffer the remaining data (this is necessarily the last block) + if m < bLen { + z.data = zb.data[m:] + } + n += m + } + + return +} + +// readBlock reads an entire frame block from the frame. +// The input buffer is the one that will receive the decompressed data. +// If the end of the frame is detected, it returns the errEndOfBlock error. +func (z *Reader) readBlock(buf []byte, b *block) error { + var bLen uint32 + if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil { + return err + } + atomic.AddInt64(&z.Pos, 4) + + switch { + case bLen == 0: + return errEndOfBlock + case bLen&(1<<31) == 0: + b.compressed = true + b.data = buf + b.zdata = make([]byte, bLen) + default: + bLen = bLen & (1<<31 - 1) + if int(bLen) > len(buf) { + return fmt.Errorf("lz4.Read: invalid block size: %d", bLen) + } + b.data = buf[:bLen] + b.zdata = buf[:bLen] + } + if _, err := io.ReadFull(z.src, b.zdata); err != nil { + return err + } + + if z.BlockChecksum { + if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil { + return err + } + xxh := hashPool.Get() + defer hashPool.Put(xxh) + xxh.Write(b.zdata) + if h := xxh.Sum32(); h != b.checksum { + return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum) + } + } + + return nil +} + +// decompressBlock decompresses a frame block. +// In case of an error, the block err is set with it and abort is set to 1. +func (z *Reader) decompressBlock(b *block, abort *uint32) { + if abort != nil { + defer z.wg.Done() + } + if b.compressed { + n := len(z.window) + m, err := UncompressBlock(b.zdata, b.data, n) + if err != nil { + if abort != nil { + atomic.StoreUint32(abort, 1) + } + b.err = err + return + } + b.data = b.data[n : n+m] + } + atomic.AddInt64(&z.Pos, int64(len(b.data))) +} + +// close validates the frame checksum (if any) and checks the next frame (if any). +func (z *Reader) close() error { + if !z.NoChecksum { + var checksum uint32 + if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil { + return err + } + if checksum != z.checksum.Sum32() { + return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum) + } + } + + // get ready for the next concatenated frame, but do not change the position + pos := z.Pos + z.Reset(z.src) + z.Pos = pos + + // since multiple frames can be concatenated, check for another one + return z.readHeader(false) +} + +// Reset discards the Reader's state and makes it equivalent to the +// result of its original state from NewReader, but reading from r instead. +// This permits reusing a Reader rather than allocating a new one. +func (z *Reader) Reset(r io.Reader) { + z.Header = Header{} + z.Pos = 0 + z.src = r + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer. +// Returns the number of bytes written. +func (z *Reader) WriteTo(w io.Writer) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + var buf []byte + + // The initial buffer being nil, the first Read will be only read the compressed frame options. + // The buffer can then be sized appropriately to support maximum concurrency decompression. + // If multiple frames are concatenated, Read() will return with no data decompressed but with + // potentially changed options. The buffer will be resized accordingly, always trying to + // maximize concurrency. + for { + nsize := 0 + // the block max size can change if multiple streams are concatenated. + // Check it after every Read(). + if z.BlockDependency { + // in case of dependency, we cannot decompress concurrently, + // so allocate the minimum buffer + window size + nsize = len(z.window) + z.BlockMaxSize + } else { + // if no dependency, allocate a buffer large enough for concurrent decompression + nsize = cpus * z.BlockMaxSize + } + if nsize != len(buf) { + buf = make([]byte, nsize) + } + + m, er := z.Read(buf) + if er != nil && er != io.EOF { + return n, er + } + m, err = w.Write(buf[:m]) + n += int64(m) + if err != nil || er == io.EOF { + return + } + } +} diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go new file mode 100644 index 0000000..b1b712f --- /dev/null +++ b/vendor/github.com/pierrec/lz4/writer.go @@ -0,0 +1,377 @@ +package lz4 + +import ( + "encoding/binary" + "fmt" + "hash" + "io" + "runtime" +) + +// Writer implements the LZ4 frame encoder. +type Writer struct { + Header + dst io.Writer + checksum hash.Hash32 // frame checksum + data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with + window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer + + zbCompressBuf []byte // buffer for compressing lz4 blocks + writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock +} + +// NewWriter returns a new LZ4 frame encoder. +// No access to the underlying io.Writer is performed. +// The supplied Header is checked at the first Write. +// It is ok to change it before the first Write but then not until a Reset() is performed. +func NewWriter(dst io.Writer) *Writer { + return &Writer{ + dst: dst, + checksum: hashPool.Get(), + Header: Header{ + BlockMaxSize: 4 << 20, + }, + writeSizeBuf: make([]byte, 4), + } +} + +// writeHeader builds and writes the header (magic+header) to the underlying io.Writer. +func (z *Writer) writeHeader() error { + // Default to 4Mb if BlockMaxSize is not set + if z.Header.BlockMaxSize == 0 { + z.Header.BlockMaxSize = 4 << 20 + } + // the only option that need to be validated + bSize, ok := bsMapValue[z.Header.BlockMaxSize] + if !ok { + return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize) + } + + // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes + // Size and DictID are optional + var buf [19]byte + + // set the fixed size data: magic number, block max size and flags + binary.LittleEndian.PutUint32(buf[0:], frameMagic) + flg := byte(Version << 6) + if !z.Header.BlockDependency { + flg |= 1 << 5 + } + if z.Header.BlockChecksum { + flg |= 1 << 4 + } + if z.Header.Size > 0 { + flg |= 1 << 3 + } + if !z.Header.NoChecksum { + flg |= 1 << 2 + } + // if z.Header.Dict { + // flg |= 1 + // } + buf[4] = flg + buf[5] = bSize << 4 + + // current buffer size: magic(4) + flags(1) + block max size (1) + n := 6 + // optional items + if z.Header.Size > 0 { + binary.LittleEndian.PutUint64(buf[n:], z.Header.Size) + n += 8 + } + // if z.Header.Dict { + // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID) + // n += 4 + // } + + // header checksum includes the flags, block max size and optional Size and DictID + z.checksum.Write(buf[4:n]) + buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF) + z.checksum.Reset() + + // header ready, write it out + if _, err := z.dst.Write(buf[0 : n+1]); err != nil { + return err + } + z.Header.done = true + + // initialize buffers dependent on header info + z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize) + + return nil +} + +// Write compresses data from the supplied buffer into the underlying io.Writer. +// Write does not return until the data has been written. +// +// If the input buffer is large enough (typically in multiples of BlockMaxSize) +// the data will be compressed concurrently. +// +// Write never buffers any data unless in BlockDependency mode where it may +// do so until it has 64Kb of data, after which it never buffers any. +func (z *Writer) Write(buf []byte) (n int, err error) { + if !z.Header.done { + if err = z.writeHeader(); err != nil { + return + } + } + + if len(buf) == 0 { + return + } + + if !z.NoChecksum { + z.checksum.Write(buf) + } + + // with block dependency, require at least 64Kb of data to work with + // not having 64Kb only matters initially to setup the first window + bl := 0 + if z.BlockDependency && len(z.window) == 0 { + bl = len(z.data) + z.data = append(z.data, buf...) + if len(z.data) < winSize { + return len(buf), nil + } + buf = z.data + z.data = nil + } + + // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block. + // Then compress into each of them concurrently if possible (no dependency). + var ( + zb block + wbuf = buf + zn = len(wbuf) / z.BlockMaxSize + zi = 0 + leftover = len(buf) % z.BlockMaxSize + ) + +loop: + for zi < zn { + if z.BlockDependency { + if zi == 0 { + // first block does not have the window + zb.data = append(z.window, wbuf[:z.BlockMaxSize]...) + zb.offset = len(z.window) + wbuf = wbuf[z.BlockMaxSize-winSize:] + } else { + // set the uncompressed data including the window from previous block + zb.data = wbuf[:z.BlockMaxSize+winSize] + zb.offset = winSize + wbuf = wbuf[z.BlockMaxSize:] + } + } else { + zb.data = wbuf[:z.BlockMaxSize] + wbuf = wbuf[z.BlockMaxSize:] + } + + goto write + } + + // left over + if leftover > 0 { + zb = block{data: wbuf} + if z.BlockDependency { + if zn == 0 { + zb.data = append(z.window, zb.data...) + zb.offset = len(z.window) + } else { + zb.offset = winSize + } + } + + leftover = 0 + goto write + } + + if z.BlockDependency { + if len(z.window) == 0 { + z.window = make([]byte, winSize) + } + // last buffer may be shorter than the window + if len(buf) >= winSize { + copy(z.window, buf[len(buf)-winSize:]) + } else { + copy(z.window, z.window[len(buf):]) + copy(z.window[len(buf)+1:], buf) + } + } + + return + +write: + zb = z.compressBlock(zb) + _, err = z.writeBlock(zb) + + written := len(zb.data) + if bl > 0 { + if written >= bl { + written -= bl + bl = 0 + } else { + bl -= written + written = 0 + } + } + + n += written + // remove the window in zb.data + if z.BlockDependency { + if zi == 0 { + n -= len(z.window) + } else { + n -= winSize + } + } + if err != nil { + return + } + zi++ + goto loop +} + +// compressBlock compresses a block. +func (z *Writer) compressBlock(zb block) block { + // compressed block size cannot exceed the input's + var ( + n int + err error + zbuf = z.zbCompressBuf + ) + if z.HighCompression { + n, err = CompressBlockHC(zb.data, zbuf, zb.offset) + } else { + n, err = CompressBlock(zb.data, zbuf, zb.offset) + } + + // compressible and compressed size smaller than decompressed: ok! + if err == nil && n > 0 && len(zb.zdata) < len(zb.data) { + zb.compressed = true + zb.zdata = zbuf[:n] + } else { + zb.compressed = false + zb.zdata = zb.data[zb.offset:] + } + + if z.BlockChecksum { + xxh := hashPool.Get() + xxh.Write(zb.zdata) + zb.checksum = xxh.Sum32() + hashPool.Put(xxh) + } + + return zb +} + +// writeBlock writes a frame block to the underlying io.Writer (size, data). +func (z *Writer) writeBlock(zb block) (int, error) { + bLen := uint32(len(zb.zdata)) + if !zb.compressed { + bLen |= 1 << 31 + } + + n := 0 + + binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen) + n, err := z.dst.Write(z.writeSizeBuf) + if err != nil { + return n, err + } + + m, err := z.dst.Write(zb.zdata) + n += m + if err != nil { + return n, err + } + + if z.BlockChecksum { + binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum) + m, err := z.dst.Write(z.writeSizeBuf) + n += m + + if err != nil { + return n, err + } + } + + return n, nil +} + +// Flush flushes any pending compressed data to the underlying writer. +// Flush does not return until the data has been written. +// If the underlying writer returns an error, Flush returns that error. +// +// Flush is only required when in BlockDependency mode and the total of +// data written is less than 64Kb. +func (z *Writer) Flush() error { + if len(z.data) == 0 { + return nil + } + + zb := z.compressBlock(block{data: z.data}) + if _, err := z.writeBlock(zb); err != nil { + return err + } + return nil +} + +// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer. +func (z *Writer) Close() error { + if !z.Header.done { + if err := z.writeHeader(); err != nil { + return err + } + } + + // buffered data for the block dependency window + if z.BlockDependency && len(z.data) > 0 { + zb := block{data: z.data} + if _, err := z.writeBlock(z.compressBlock(zb)); err != nil { + return err + } + } + + if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil { + return err + } + if !z.NoChecksum { + if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil { + return err + } + } + return nil +} + +// Reset clears the state of the Writer z such that it is equivalent to its +// initial state from NewWriter, but instead writing to w. +// No access to the underlying io.Writer is performed. +func (z *Writer) Reset(w io.Writer) { + z.Header = Header{} + z.dst = w + z.checksum.Reset() + z.data = nil + z.window = nil +} + +// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer. +// Returns the number of bytes read. +// It does not close the Writer. +func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) { + cpus := runtime.GOMAXPROCS(0) + buf := make([]byte, cpus*z.BlockMaxSize) + for { + m, er := io.ReadFull(r, buf) + n += int64(m) + if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF { + if _, err = z.Write(buf[:m]); err != nil { + return + } + if er == nil { + continue + } + return + } + return n, er + } +} diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go new file mode 100644 index 0000000..ff58256 --- /dev/null +++ b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go @@ -0,0 +1,212 @@ +// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version). +// (https://github.com/Cyan4973/xxHash/) +package xxHash32 + +import "hash" + +const ( + prime32_1 = 2654435761 + prime32_2 = 2246822519 + prime32_3 = 3266489917 + prime32_4 = 668265263 + prime32_5 = 374761393 +) + +type xxHash struct { + seed uint32 + v1 uint32 + v2 uint32 + v3 uint32 + v4 uint32 + totalLen uint64 + buf [16]byte + bufused int +} + +// New returns a new Hash32 instance. +func New(seed uint32) hash.Hash32 { + xxh := &xxHash{seed: seed} + xxh.Reset() + return xxh +} + +// Sum appends the current hash to b and returns the resulting slice. +// It does not change the underlying hash state. +func (xxh xxHash) Sum(b []byte) []byte { + h32 := xxh.Sum32() + return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24)) +} + +// Reset resets the Hash to its initial state. +func (xxh *xxHash) Reset() { + xxh.v1 = xxh.seed + prime32_1 + prime32_2 + xxh.v2 = xxh.seed + prime32_2 + xxh.v3 = xxh.seed + xxh.v4 = xxh.seed - prime32_1 + xxh.totalLen = 0 + xxh.bufused = 0 +} + +// Size returns the number of bytes returned by Sum(). +func (xxh *xxHash) Size() int { + return 4 +} + +// BlockSize gives the minimum number of bytes accepted by Write(). +func (xxh *xxHash) BlockSize() int { + return 1 +} + +// Write adds input bytes to the Hash. +// It never returns an error. +func (xxh *xxHash) Write(input []byte) (int, error) { + n := len(input) + m := xxh.bufused + + xxh.totalLen += uint64(n) + + r := len(xxh.buf) - m + if n < r { + copy(xxh.buf[m:], input) + xxh.bufused += len(input) + return n, nil + } + + p := 0 + if m > 0 { + // some data left from previous update + copy(xxh.buf[xxh.bufused:], input[:r]) + xxh.bufused += len(input) - r + + // fast rotl(13) + xxh.v1 = rol13(xxh.v1+u32(xxh.buf[:])*prime32_2) * prime32_1 + xxh.v2 = rol13(xxh.v2+u32(xxh.buf[4:])*prime32_2) * prime32_1 + xxh.v3 = rol13(xxh.v3+u32(xxh.buf[8:])*prime32_2) * prime32_1 + xxh.v4 = rol13(xxh.v4+u32(xxh.buf[12:])*prime32_2) * prime32_1 + p = r + xxh.bufused = 0 + } + + // Causes compiler to work directly from registers instead of stack: + v1, v2, v3, v4 := xxh.v1, xxh.v2, xxh.v3, xxh.v4 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1 + } + xxh.v1, xxh.v2, xxh.v3, xxh.v4 = v1, v2, v3, v4 + + copy(xxh.buf[xxh.bufused:], input[p:]) + xxh.bufused += len(input) - p + + return n, nil +} + +// Sum32 returns the 32 bits Hash value. +func (xxh *xxHash) Sum32() uint32 { + h32 := uint32(xxh.totalLen) + if xxh.totalLen >= 16 { + h32 += rol1(xxh.v1) + rol7(xxh.v2) + rol12(xxh.v3) + rol18(xxh.v4) + } else { + h32 += xxh.seed + prime32_5 + } + + p := 0 + n := xxh.bufused + for n := n - 4; p <= n; p += 4 { + h32 += u32(xxh.buf[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for ; p < n; p++ { + h32 += uint32(xxh.buf[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +// Checksum returns the 32bits Hash value. +func Checksum(input []byte, seed uint32) uint32 { + n := len(input) + h32 := uint32(n) + + if n < 16 { + h32 += seed + prime32_5 + } else { + v1 := seed + prime32_1 + prime32_2 + v2 := seed + prime32_2 + v3 := seed + v4 := seed - prime32_1 + p := 0 + for n := n - 16; p <= n; p += 16 { + sub := input[p:][:16] //BCE hint for compiler + v1 = rol13(v1+u32(sub[:])*prime32_2) * prime32_1 + v2 = rol13(v2+u32(sub[4:])*prime32_2) * prime32_1 + v3 = rol13(v3+u32(sub[8:])*prime32_2) * prime32_1 + v4 = rol13(v4+u32(sub[12:])*prime32_2) * prime32_1 + } + input = input[p:] + n -= p + h32 += rol1(v1) + rol7(v2) + rol12(v3) + rol18(v4) + } + + p := 0 + for n := n - 4; p <= n; p += 4 { + h32 += u32(input[p:p+4]) * prime32_3 + h32 = rol17(h32) * prime32_4 + } + for p < n { + h32 += uint32(input[p]) * prime32_5 + h32 = rol11(h32) * prime32_1 + p++ + } + + h32 ^= h32 >> 15 + h32 *= prime32_2 + h32 ^= h32 >> 13 + h32 *= prime32_3 + h32 ^= h32 >> 16 + + return h32 +} + +func u32(buf []byte) uint32 { + // go compiler recognizes this pattern and optimizes it on little endian platforms + return uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 +} + +func rol1(u uint32) uint32 { + return u<<1 | u>>31 +} + +func rol7(u uint32) uint32 { + return u<<7 | u>>25 +} + +func rol11(u uint32) uint32 { + return u<<11 | u>>21 +} + +func rol12(u uint32) uint32 { + return u<<12 | u>>20 +} + +func rol13(u uint32) uint32 { + return u<<13 | u>>19 +} + +func rol17(u uint32) uint32 { + return u<<17 | u>>15 +} + +func rol18(u uint32) uint32 { + return u<<18 | u>>14 +} diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go new file mode 100644 index 0000000..842ee80 --- /dev/null +++ b/vendor/github.com/pkg/errors/errors.go @@ -0,0 +1,269 @@ +// Package errors provides simple error handling primitives. +// +// The traditional error handling idiom in Go is roughly akin to +// +// if err != nil { +// return err +// } +// +// which applied recursively up the call stack results in error reports +// without context or debugging information. The errors package allows +// programmers to add context to the failure path in their code in a way +// that does not destroy the original value of the error. +// +// Adding context to an error +// +// The errors.Wrap function returns a new error that adds context to the +// original error by recording a stack trace at the point Wrap is called, +// and the supplied message. For example +// +// _, err := ioutil.ReadAll(r) +// if err != nil { +// return errors.Wrap(err, "read failed") +// } +// +// If additional control is required the errors.WithStack and errors.WithMessage +// functions destructure errors.Wrap into its component operations of annotating +// an error with a stack trace and an a message, respectively. +// +// Retrieving the cause of an error +// +// Using errors.Wrap constructs a stack of errors, adding context to the +// preceding error. Depending on the nature of the error it may be necessary +// to reverse the operation of errors.Wrap to retrieve the original error +// for inspection. Any error value which implements this interface +// +// type causer interface { +// Cause() error +// } +// +// can be inspected by errors.Cause. errors.Cause will recursively retrieve +// the topmost error which does not implement causer, which is assumed to be +// the original cause. For example: +// +// switch err := errors.Cause(err).(type) { +// case *MyError: +// // handle specifically +// default: +// // unknown error +// } +// +// causer interface is not exported by this package, but is considered a part +// of stable public API. +// +// Formatted printing of errors +// +// All error values returned from this package implement fmt.Formatter and can +// be formatted by the fmt package. The following verbs are supported +// +// %s print the error. If the error has a Cause it will be +// printed recursively +// %v see %s +// %+v extended format. Each Frame of the error's StackTrace will +// be printed in detail. +// +// Retrieving the stack trace of an error or wrapper +// +// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are +// invoked. This information can be retrieved with the following interface. +// +// type stackTracer interface { +// StackTrace() errors.StackTrace +// } +// +// Where errors.StackTrace is defined as +// +// type StackTrace []Frame +// +// The Frame type represents a call site in the stack trace. Frame supports +// the fmt.Formatter interface that can be used for printing information about +// the stack trace of this error. For example: +// +// if err, ok := err.(stackTracer); ok { +// for _, f := range err.StackTrace() { +// fmt.Printf("%+s:%d", f) +// } +// } +// +// stackTracer interface is not exported by this package, but is considered a part +// of stable public API. +// +// See the documentation for Frame.Format for more details. +package errors + +import ( + "fmt" + "io" +) + +// New returns an error with the supplied message. +// New also records the stack trace at the point it was called. +func New(message string) error { + return &fundamental{ + msg: message, + stack: callers(), + } +} + +// Errorf formats according to a format specifier and returns the string +// as a value that satisfies error. +// Errorf also records the stack trace at the point it was called. +func Errorf(format string, args ...interface{}) error { + return &fundamental{ + msg: fmt.Sprintf(format, args...), + stack: callers(), + } +} + +// fundamental is an error that has a message and a stack, but no caller. +type fundamental struct { + msg string + *stack +} + +func (f *fundamental) Error() string { return f.msg } + +func (f *fundamental) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + io.WriteString(s, f.msg) + f.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, f.msg) + case 'q': + fmt.Fprintf(s, "%q", f.msg) + } +} + +// WithStack annotates err with a stack trace at the point WithStack was called. +// If err is nil, WithStack returns nil. +func WithStack(err error) error { + if err == nil { + return nil + } + return &withStack{ + err, + callers(), + } +} + +type withStack struct { + error + *stack +} + +func (w *withStack) Cause() error { return w.error } + +func (w *withStack) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v", w.Cause()) + w.stack.Format(s, verb) + return + } + fallthrough + case 's': + io.WriteString(s, w.Error()) + case 'q': + fmt.Fprintf(s, "%q", w.Error()) + } +} + +// Wrap returns an error annotating err with a stack trace +// at the point Wrap is called, and the supplied message. +// If err is nil, Wrap returns nil. +func Wrap(err error, message string) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: message, + } + return &withStack{ + err, + callers(), + } +} + +// Wrapf returns an error annotating err with a stack trace +// at the point Wrapf is call, and the format specifier. +// If err is nil, Wrapf returns nil. +func Wrapf(err error, format string, args ...interface{}) error { + if err == nil { + return nil + } + err = &withMessage{ + cause: err, + msg: fmt.Sprintf(format, args...), + } + return &withStack{ + err, + callers(), + } +} + +// WithMessage annotates err with a new message. +// If err is nil, WithMessage returns nil. +func WithMessage(err error, message string) error { + if err == nil { + return nil + } + return &withMessage{ + cause: err, + msg: message, + } +} + +type withMessage struct { + cause error + msg string +} + +func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } +func (w *withMessage) Cause() error { return w.cause } + +func (w *withMessage) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + if s.Flag('+') { + fmt.Fprintf(s, "%+v\n", w.Cause()) + io.WriteString(s, w.msg) + return + } + fallthrough + case 's', 'q': + io.WriteString(s, w.Error()) + } +} + +// Cause returns the underlying cause of the error, if possible. +// An error value has a cause if it implements the following +// interface: +// +// type causer interface { +// Cause() error +// } +// +// If the error does not implement Cause, the original error will +// be returned. If the error is nil, nil will be returned without further +// investigation. +func Cause(err error) error { + type causer interface { + Cause() error + } + + for err != nil { + cause, ok := err.(causer) + if !ok { + break + } + err = cause.Cause() + } + return err +} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go new file mode 100644 index 0000000..cbe3f3e --- /dev/null +++ b/vendor/github.com/pkg/errors/stack.go @@ -0,0 +1,186 @@ +package errors + +import ( + "fmt" + "io" + "path" + "runtime" + "strings" +) + +// Frame represents a program counter inside a stack frame. +type Frame uintptr + +// pc returns the program counter for this frame; +// multiple frames may have the same PC value. +func (f Frame) pc() uintptr { return uintptr(f) - 1 } + +// file returns the full path to the file that contains the +// function for this Frame's pc. +func (f Frame) file() string { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return "unknown" + } + file, _ := fn.FileLine(f.pc()) + return file +} + +// line returns the line number of source code of the +// function for this Frame's pc. +func (f Frame) line() int { + fn := runtime.FuncForPC(f.pc()) + if fn == nil { + return 0 + } + _, line := fn.FileLine(f.pc()) + return line +} + +// Format formats the frame according to the fmt.Formatter interface. +// +// %s source file +// %d source line +// %n function name +// %v equivalent to %s:%d +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+s path of source file relative to the compile time GOPATH +// %+v equivalent to %+s:%d +func (f Frame) Format(s fmt.State, verb rune) { + switch verb { + case 's': + switch { + case s.Flag('+'): + pc := f.pc() + fn := runtime.FuncForPC(pc) + if fn == nil { + io.WriteString(s, "unknown") + } else { + file, _ := fn.FileLine(pc) + fmt.Fprintf(s, "%s\n\t%s", fn.Name(), file) + } + default: + io.WriteString(s, path.Base(f.file())) + } + case 'd': + fmt.Fprintf(s, "%d", f.line()) + case 'n': + name := runtime.FuncForPC(f.pc()).Name() + io.WriteString(s, funcname(name)) + case 'v': + f.Format(s, 's') + io.WriteString(s, ":") + f.Format(s, 'd') + } +} + +// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). +type StackTrace []Frame + +// Format formats the stack of Frames according to the fmt.Formatter interface. +// +// %s lists source files for each Frame in the stack +// %v lists the source file and line number for each Frame in the stack +// +// Format accepts flags that alter the printing of some verbs, as follows: +// +// %+v Prints filename, function, and line number for each Frame in the stack. +func (st StackTrace) Format(s fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case s.Flag('+'): + for _, f := range st { + fmt.Fprintf(s, "\n%+v", f) + } + case s.Flag('#'): + fmt.Fprintf(s, "%#v", []Frame(st)) + default: + fmt.Fprintf(s, "%v", []Frame(st)) + } + case 's': + fmt.Fprintf(s, "%s", []Frame(st)) + } +} + +// stack represents a stack of program counters. +type stack []uintptr + +func (s *stack) Format(st fmt.State, verb rune) { + switch verb { + case 'v': + switch { + case st.Flag('+'): + for _, pc := range *s { + f := Frame(pc) + fmt.Fprintf(st, "\n%+v", f) + } + } + } +} + +func (s *stack) StackTrace() StackTrace { + f := make([]Frame, len(*s)) + for i := 0; i < len(f); i++ { + f[i] = Frame((*s)[i]) + } + return f +} + +func callers() *stack { + const depth = 32 + var pcs [depth]uintptr + n := runtime.Callers(3, pcs[:]) + var st stack = pcs[0:n] + return &st +} + +// funcname removes the path prefix component of a function's name reported by func.Name(). +func funcname(name string) string { + i := strings.LastIndex(name, "/") + name = name[i+1:] + i = strings.Index(name, ".") + return name[i+1:] +} + +func trimGOPATH(name, file string) string { + // Here we want to get the source file path relative to the compile time + // GOPATH. As of Go 1.6.x there is no direct way to know the compiled + // GOPATH at runtime, but we can infer the number of path segments in the + // GOPATH. We note that fn.Name() returns the function name qualified by + // the import path, which does not include the GOPATH. Thus we can trim + // segments from the beginning of the file path until the number of path + // separators remaining is one more than the number of path separators in + // the function name. For example, given: + // + // GOPATH /home/user + // file /home/user/src/pkg/sub/file.go + // fn.Name() pkg/sub.Type.Method + // + // We want to produce: + // + // pkg/sub/file.go + // + // From this we can easily see that fn.Name() has one less path separator + // than our desired output. We count separators from the end of the file + // path until it finds two more than in the function name and then move + // one character forward to preserve the initial path segment without a + // leading separator. + const sep = "/" + goal := strings.Count(name, sep) + 2 + i := len(file) + for n := 0; n < goal; n++ { + i = strings.LastIndex(file[:i], sep) + if i == -1 { + // not enough separators found, set i so that the slice expression + // below leaves file unmodified + i = -len(sep) + break + } + } + // get back to 0 or trim the leading separator + file = file[i+len(sep):] + return file +} diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go new file mode 100644 index 0000000..bb7b039 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/counter.go @@ -0,0 +1,112 @@ +package metrics + +import "sync/atomic" + +// Counters hold an int64 value that can be incremented and decremented. +type Counter interface { + Clear() + Count() int64 + Dec(int64) + Inc(int64) + Snapshot() Counter +} + +// GetOrRegisterCounter returns an existing Counter or constructs and registers +// a new StandardCounter. +func GetOrRegisterCounter(name string, r Registry) Counter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewCounter).(Counter) +} + +// NewCounter constructs a new StandardCounter. +func NewCounter() Counter { + if UseNilMetrics { + return NilCounter{} + } + return &StandardCounter{0} +} + +// NewRegisteredCounter constructs and registers a new StandardCounter. +func NewRegisteredCounter(name string, r Registry) Counter { + c := NewCounter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// CounterSnapshot is a read-only copy of another Counter. +type CounterSnapshot int64 + +// Clear panics. +func (CounterSnapshot) Clear() { + panic("Clear called on a CounterSnapshot") +} + +// Count returns the count at the time the snapshot was taken. +func (c CounterSnapshot) Count() int64 { return int64(c) } + +// Dec panics. +func (CounterSnapshot) Dec(int64) { + panic("Dec called on a CounterSnapshot") +} + +// Inc panics. +func (CounterSnapshot) Inc(int64) { + panic("Inc called on a CounterSnapshot") +} + +// Snapshot returns the snapshot. +func (c CounterSnapshot) Snapshot() Counter { return c } + +// NilCounter is a no-op Counter. +type NilCounter struct{} + +// Clear is a no-op. +func (NilCounter) Clear() {} + +// Count is a no-op. +func (NilCounter) Count() int64 { return 0 } + +// Dec is a no-op. +func (NilCounter) Dec(i int64) {} + +// Inc is a no-op. +func (NilCounter) Inc(i int64) {} + +// Snapshot is a no-op. +func (NilCounter) Snapshot() Counter { return NilCounter{} } + +// StandardCounter is the standard implementation of a Counter and uses the +// sync/atomic package to manage a single int64 value. +type StandardCounter struct { + count int64 +} + +// Clear sets the counter to zero. +func (c *StandardCounter) Clear() { + atomic.StoreInt64(&c.count, 0) +} + +// Count returns the current count. +func (c *StandardCounter) Count() int64 { + return atomic.LoadInt64(&c.count) +} + +// Dec decrements the counter by the given amount. +func (c *StandardCounter) Dec(i int64) { + atomic.AddInt64(&c.count, -i) +} + +// Inc increments the counter by the given amount. +func (c *StandardCounter) Inc(i int64) { + atomic.AddInt64(&c.count, i) +} + +// Snapshot returns a read-only copy of the counter. +func (c *StandardCounter) Snapshot() Counter { + return CounterSnapshot(c.Count()) +} diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go new file mode 100644 index 0000000..043ccef --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/debug.go @@ -0,0 +1,76 @@ +package metrics + +import ( + "runtime/debug" + "time" +) + +var ( + debugMetrics struct { + GCStats struct { + LastGC Gauge + NumGC Gauge + Pause Histogram + //PauseQuantiles Histogram + PauseTotal Gauge + } + ReadGCStats Timer + } + gcStats debug.GCStats +) + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called as a goroutine. +func CaptureDebugGCStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureDebugGCStatsOnce(r) + } +} + +// Capture new values for the Go garbage collector statistics exported in +// debug.GCStats. This is designed to be called in a background goroutine. +// Giving a registry which has not been given to RegisterDebugGCStats will +// panic. +// +// Be careful (but much less so) with this because debug.ReadGCStats calls +// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world +// operation, isn't something you want to be doing all the time. +func CaptureDebugGCStatsOnce(r Registry) { + lastGC := gcStats.LastGC + t := time.Now() + debug.ReadGCStats(&gcStats) + debugMetrics.ReadGCStats.UpdateSince(t) + + debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano())) + debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC)) + if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) { + debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0])) + } + //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles) + debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal)) +} + +// Register metrics for the Go garbage collector statistics exported in +// debug.GCStats. The metrics are named by their fully-qualified Go symbols, +// i.e. debug.GCStats.PauseTotal. +func RegisterDebugGCStats(r Registry) { + debugMetrics.GCStats.LastGC = NewGauge() + debugMetrics.GCStats.NumGC = NewGauge() + debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015)) + //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015)) + debugMetrics.GCStats.PauseTotal = NewGauge() + debugMetrics.ReadGCStats = NewTimer() + + r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC) + r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC) + r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause) + //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles) + r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal) + r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats) +} + +// Allocate an initial slice for gcStats.Pause to avoid allocations during +// normal operation. +func init() { + gcStats.Pause = make([]time.Duration, 11) +} diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go new file mode 100644 index 0000000..694a1d0 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/ewma.go @@ -0,0 +1,118 @@ +package metrics + +import ( + "math" + "sync" + "sync/atomic" +) + +// EWMAs continuously calculate an exponentially-weighted moving average +// based on an outside source of clock ticks. +type EWMA interface { + Rate() float64 + Snapshot() EWMA + Tick() + Update(int64) +} + +// NewEWMA constructs a new EWMA with the given alpha. +func NewEWMA(alpha float64) EWMA { + if UseNilMetrics { + return NilEWMA{} + } + return &StandardEWMA{alpha: alpha} +} + +// NewEWMA1 constructs a new EWMA for a one-minute moving average. +func NewEWMA1() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/1)) +} + +// NewEWMA5 constructs a new EWMA for a five-minute moving average. +func NewEWMA5() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/5)) +} + +// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average. +func NewEWMA15() EWMA { + return NewEWMA(1 - math.Exp(-5.0/60.0/15)) +} + +// EWMASnapshot is a read-only copy of another EWMA. +type EWMASnapshot float64 + +// Rate returns the rate of events per second at the time the snapshot was +// taken. +func (a EWMASnapshot) Rate() float64 { return float64(a) } + +// Snapshot returns the snapshot. +func (a EWMASnapshot) Snapshot() EWMA { return a } + +// Tick panics. +func (EWMASnapshot) Tick() { + panic("Tick called on an EWMASnapshot") +} + +// Update panics. +func (EWMASnapshot) Update(int64) { + panic("Update called on an EWMASnapshot") +} + +// NilEWMA is a no-op EWMA. +type NilEWMA struct{} + +// Rate is a no-op. +func (NilEWMA) Rate() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilEWMA) Snapshot() EWMA { return NilEWMA{} } + +// Tick is a no-op. +func (NilEWMA) Tick() {} + +// Update is a no-op. +func (NilEWMA) Update(n int64) {} + +// StandardEWMA is the standard implementation of an EWMA and tracks the number +// of uncounted events and processes them on each tick. It uses the +// sync/atomic package to manage uncounted events. +type StandardEWMA struct { + uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment + alpha float64 + rate float64 + init bool + mutex sync.Mutex +} + +// Rate returns the moving average rate of events per second. +func (a *StandardEWMA) Rate() float64 { + a.mutex.Lock() + defer a.mutex.Unlock() + return a.rate * float64(1e9) +} + +// Snapshot returns a read-only copy of the EWMA. +func (a *StandardEWMA) Snapshot() EWMA { + return EWMASnapshot(a.Rate()) +} + +// Tick ticks the clock to update the moving average. It assumes it is called +// every five seconds. +func (a *StandardEWMA) Tick() { + count := atomic.LoadInt64(&a.uncounted) + atomic.AddInt64(&a.uncounted, -count) + instantRate := float64(count) / float64(5e9) + a.mutex.Lock() + defer a.mutex.Unlock() + if a.init { + a.rate += a.alpha * (instantRate - a.rate) + } else { + a.init = true + a.rate = instantRate + } +} + +// Update adds n uncounted events. +func (a *StandardEWMA) Update(n int64) { + atomic.AddInt64(&a.uncounted, n) +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go new file mode 100644 index 0000000..cb57a93 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge.go @@ -0,0 +1,120 @@ +package metrics + +import "sync/atomic" + +// Gauges hold an int64 value that can be set arbitrarily. +type Gauge interface { + Snapshot() Gauge + Update(int64) + Value() int64 +} + +// GetOrRegisterGauge returns an existing Gauge or constructs and registers a +// new StandardGauge. +func GetOrRegisterGauge(name string, r Registry) Gauge { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGauge).(Gauge) +} + +// NewGauge constructs a new StandardGauge. +func NewGauge() Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &StandardGauge{0} +} + +// NewRegisteredGauge constructs and registers a new StandardGauge. +func NewRegisteredGauge(name string, r Registry) Gauge { + c := NewGauge() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGauge(f func() int64) Gauge { + if UseNilMetrics { + return NilGauge{} + } + return &FunctionalGauge{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge { + c := NewFunctionalGauge(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeSnapshot is a read-only copy of another Gauge. +type GaugeSnapshot int64 + +// Snapshot returns the snapshot. +func (g GaugeSnapshot) Snapshot() Gauge { return g } + +// Update panics. +func (GaugeSnapshot) Update(int64) { + panic("Update called on a GaugeSnapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeSnapshot) Value() int64 { return int64(g) } + +// NilGauge is a no-op Gauge. +type NilGauge struct{} + +// Snapshot is a no-op. +func (NilGauge) Snapshot() Gauge { return NilGauge{} } + +// Update is a no-op. +func (NilGauge) Update(v int64) {} + +// Value is a no-op. +func (NilGauge) Value() int64 { return 0 } + +// StandardGauge is the standard implementation of a Gauge and uses the +// sync/atomic package to manage a single int64 value. +type StandardGauge struct { + value int64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGauge) Snapshot() Gauge { + return GaugeSnapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGauge) Update(v int64) { + atomic.StoreInt64(&g.value, v) +} + +// Value returns the gauge's current value. +func (g *StandardGauge) Value() int64 { + return atomic.LoadInt64(&g.value) +} + +// FunctionalGauge returns value from given function +type FunctionalGauge struct { + value func() int64 +} + +// Value returns the gauge's current value. +func (g FunctionalGauge) Value() int64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) } + +// Update panics. +func (FunctionalGauge) Update(int64) { + panic("Update called on a FunctionalGauge") +} diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go new file mode 100644 index 0000000..6f93920 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go @@ -0,0 +1,127 @@ +package metrics + +import "sync" + +// GaugeFloat64s hold a float64 value that can be set arbitrarily. +type GaugeFloat64 interface { + Snapshot() GaugeFloat64 + Update(float64) + Value() float64 +} + +// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a +// new StandardGaugeFloat64. +func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64) +} + +// NewGaugeFloat64 constructs a new StandardGaugeFloat64. +func NewGaugeFloat64() GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &StandardGaugeFloat64{ + value: 0.0, + } +} + +// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64. +func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 { + c := NewGaugeFloat64() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewFunctionalGauge constructs a new FunctionalGauge. +func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 { + if UseNilMetrics { + return NilGaugeFloat64{} + } + return &FunctionalGaugeFloat64{value: f} +} + +// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge. +func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 { + c := NewFunctionalGaugeFloat64(f) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64. +type GaugeFloat64Snapshot float64 + +// Snapshot returns the snapshot. +func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g } + +// Update panics. +func (GaugeFloat64Snapshot) Update(float64) { + panic("Update called on a GaugeFloat64Snapshot") +} + +// Value returns the value at the time the snapshot was taken. +func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) } + +// NilGauge is a no-op Gauge. +type NilGaugeFloat64 struct{} + +// Snapshot is a no-op. +func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} } + +// Update is a no-op. +func (NilGaugeFloat64) Update(v float64) {} + +// Value is a no-op. +func (NilGaugeFloat64) Value() float64 { return 0.0 } + +// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses +// sync.Mutex to manage a single float64 value. +type StandardGaugeFloat64 struct { + mutex sync.Mutex + value float64 +} + +// Snapshot returns a read-only copy of the gauge. +func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 { + return GaugeFloat64Snapshot(g.Value()) +} + +// Update updates the gauge's value. +func (g *StandardGaugeFloat64) Update(v float64) { + g.mutex.Lock() + defer g.mutex.Unlock() + g.value = v +} + +// Value returns the gauge's current value. +func (g *StandardGaugeFloat64) Value() float64 { + g.mutex.Lock() + defer g.mutex.Unlock() + return g.value +} + +// FunctionalGaugeFloat64 returns value from given function +type FunctionalGaugeFloat64 struct { + value func() float64 +} + +// Value returns the gauge's current value. +func (g FunctionalGaugeFloat64) Value() float64 { + return g.value() +} + +// Snapshot returns the snapshot. +func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) } + +// Update panics. +func (FunctionalGaugeFloat64) Update(float64) { + panic("Update called on a FunctionalGaugeFloat64") +} diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go new file mode 100644 index 0000000..abd0a7d --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/graphite.go @@ -0,0 +1,113 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "strconv" + "strings" + "time" +) + +// GraphiteConfig provides a container with configuration parameters for +// the Graphite exporter +type GraphiteConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names + Percentiles []float64 // Percentiles to export from timers and histograms +} + +// Graphite is a blocking exporter function which reports metrics in r +// to a graphite server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + GraphiteWithConfig(GraphiteConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999}, + }) +} + +// GraphiteWithConfig is a blocking exporter function just like Graphite, +// but it takes a GraphiteConfig instead. +func GraphiteWithConfig(c GraphiteConfig) { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + for _ = range time.Tick(c.FlushInterval) { + if err := graphite(&c); nil != err { + log.Println(err) + } + } +} + +// GraphiteOnce performs a single submission to Graphite, returning a +// non-nil error on failed connections. This can be used in a loop +// similar to GraphiteWithConfig for custom error handling. +func GraphiteOnce(c GraphiteConfig) error { + log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015") + return graphite(&c) +} + +func graphite(c *GraphiteConfig) error { + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now) + case Gauge: + fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now) + case GaugeFloat64: + fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now) + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles(c.Percentiles) + fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now) + fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now) + fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now) + fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now) + fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now) + for psIdx, psKey := range c.Percentiles { + key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1) + fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now) + } + fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now) + fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now) + fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now) + fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go new file mode 100644 index 0000000..445131c --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/healthcheck.go @@ -0,0 +1,61 @@ +package metrics + +// Healthchecks hold an error value describing an arbitrary up/down status. +type Healthcheck interface { + Check() + Error() error + Healthy() + Unhealthy(error) +} + +// NewHealthcheck constructs a new Healthcheck which will use the given +// function to update its status. +func NewHealthcheck(f func(Healthcheck)) Healthcheck { + if UseNilMetrics { + return NilHealthcheck{} + } + return &StandardHealthcheck{nil, f} +} + +// NilHealthcheck is a no-op. +type NilHealthcheck struct{} + +// Check is a no-op. +func (NilHealthcheck) Check() {} + +// Error is a no-op. +func (NilHealthcheck) Error() error { return nil } + +// Healthy is a no-op. +func (NilHealthcheck) Healthy() {} + +// Unhealthy is a no-op. +func (NilHealthcheck) Unhealthy(error) {} + +// StandardHealthcheck is the standard implementation of a Healthcheck and +// stores the status and a function to call to update the status. +type StandardHealthcheck struct { + err error + f func(Healthcheck) +} + +// Check runs the healthcheck function to update the healthcheck's status. +func (h *StandardHealthcheck) Check() { + h.f(h) +} + +// Error returns the healthcheck's status, which will be nil if it is healthy. +func (h *StandardHealthcheck) Error() error { + return h.err +} + +// Healthy marks the healthcheck as healthy. +func (h *StandardHealthcheck) Healthy() { + h.err = nil +} + +// Unhealthy marks the healthcheck as unhealthy. The error is stored and +// may be retrieved by the Error method. +func (h *StandardHealthcheck) Unhealthy(err error) { + h.err = err +} diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go new file mode 100644 index 0000000..dbc837f --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/histogram.go @@ -0,0 +1,202 @@ +package metrics + +// Histograms calculate distribution statistics from a series of int64 values. +type Histogram interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Sample() Sample + Snapshot() Histogram + StdDev() float64 + Sum() int64 + Update(int64) + Variance() float64 +} + +// GetOrRegisterHistogram returns an existing Histogram or constructs and +// registers a new StandardHistogram. +func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram) +} + +// NewHistogram constructs a new StandardHistogram from a Sample. +func NewHistogram(s Sample) Histogram { + if UseNilMetrics { + return NilHistogram{} + } + return &StandardHistogram{sample: s} +} + +// NewRegisteredHistogram constructs and registers a new StandardHistogram from +// a Sample. +func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram { + c := NewHistogram(s) + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// HistogramSnapshot is a read-only copy of another Histogram. +type HistogramSnapshot struct { + sample *SampleSnapshot +} + +// Clear panics. +func (*HistogramSnapshot) Clear() { + panic("Clear called on a HistogramSnapshot") +} + +// Count returns the number of samples recorded at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample at the time the snapshot +// was taken. +func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample at the time the snapshot was +// taken. +func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the sample +// at the time the snapshot was taken. +func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *HistogramSnapshot) Sample() Sample { return h.sample } + +// Snapshot returns the snapshot. +func (h *HistogramSnapshot) Snapshot() Histogram { return h } + +// StdDev returns the standard deviation of the values in the sample at the +// time the snapshot was taken. +func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample at the time the snapshot was taken. +func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() } + +// Update panics. +func (*HistogramSnapshot) Update(int64) { + panic("Update called on a HistogramSnapshot") +} + +// Variance returns the variance of inputs at the time the snapshot was taken. +func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() } + +// NilHistogram is a no-op Histogram. +type NilHistogram struct{} + +// Clear is a no-op. +func (NilHistogram) Clear() {} + +// Count is a no-op. +func (NilHistogram) Count() int64 { return 0 } + +// Max is a no-op. +func (NilHistogram) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilHistogram) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilHistogram) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilHistogram) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilHistogram) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Sample is a no-op. +func (NilHistogram) Sample() Sample { return NilSample{} } + +// Snapshot is a no-op. +func (NilHistogram) Snapshot() Histogram { return NilHistogram{} } + +// StdDev is a no-op. +func (NilHistogram) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilHistogram) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilHistogram) Update(v int64) {} + +// Variance is a no-op. +func (NilHistogram) Variance() float64 { return 0.0 } + +// StandardHistogram is the standard implementation of a Histogram and uses a +// Sample to bound its memory use. +type StandardHistogram struct { + sample Sample +} + +// Clear clears the histogram and its sample. +func (h *StandardHistogram) Clear() { h.sample.Clear() } + +// Count returns the number of samples recorded since the histogram was last +// cleared. +func (h *StandardHistogram) Count() int64 { return h.sample.Count() } + +// Max returns the maximum value in the sample. +func (h *StandardHistogram) Max() int64 { return h.sample.Max() } + +// Mean returns the mean of the values in the sample. +func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() } + +// Min returns the minimum value in the sample. +func (h *StandardHistogram) Min() int64 { return h.sample.Min() } + +// Percentile returns an arbitrary percentile of the values in the sample. +func (h *StandardHistogram) Percentile(p float64) float64 { + return h.sample.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (h *StandardHistogram) Percentiles(ps []float64) []float64 { + return h.sample.Percentiles(ps) +} + +// Sample returns the Sample underlying the histogram. +func (h *StandardHistogram) Sample() Sample { return h.sample } + +// Snapshot returns a read-only copy of the histogram. +func (h *StandardHistogram) Snapshot() Histogram { + return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)} +} + +// StdDev returns the standard deviation of the values in the sample. +func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() } + +// Sum returns the sum in the sample. +func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() } + +// Update samples a new value. +func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) } + +// Variance returns the variance of the values in the sample. +func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go new file mode 100644 index 0000000..2fdcbcf --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/json.go @@ -0,0 +1,87 @@ +package metrics + +import ( + "encoding/json" + "io" + "time" +) + +// MarshalJSON returns a byte slice containing a JSON representation of all +// the metrics in the Registry. +func (r *StandardRegistry) MarshalJSON() ([]byte, error) { + data := make(map[string]map[string]interface{}) + r.Each(func(name string, i interface{}) { + values := make(map[string]interface{}) + switch metric := i.(type) { + case Counter: + values["count"] = metric.Count() + case Gauge: + values["value"] = metric.Value() + case GaugeFloat64: + values["value"] = metric.Value() + case Healthcheck: + values["error"] = nil + metric.Check() + if err := metric.Error(); nil != err { + values["error"] = metric.Error().Error() + } + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = h.Count() + values["min"] = h.Min() + values["max"] = h.Max() + values["mean"] = h.Mean() + values["stddev"] = h.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + case Meter: + m := metric.Snapshot() + values["count"] = m.Count() + values["1m.rate"] = m.Rate1() + values["5m.rate"] = m.Rate5() + values["15m.rate"] = m.Rate15() + values["mean.rate"] = m.RateMean() + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + values["count"] = t.Count() + values["min"] = t.Min() + values["max"] = t.Max() + values["mean"] = t.Mean() + values["stddev"] = t.StdDev() + values["median"] = ps[0] + values["75%"] = ps[1] + values["95%"] = ps[2] + values["99%"] = ps[3] + values["99.9%"] = ps[4] + values["1m.rate"] = t.Rate1() + values["5m.rate"] = t.Rate5() + values["15m.rate"] = t.Rate15() + values["mean.rate"] = t.RateMean() + } + data[name] = values + }) + return json.Marshal(data) +} + +// WriteJSON writes metrics from the given registry periodically to the +// specified io.Writer as JSON. +func WriteJSON(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteJSONOnce(r, w) + } +} + +// WriteJSONOnce writes metrics from the given registry to the specified +// io.Writer as JSON. +func WriteJSONOnce(r Registry, w io.Writer) { + json.NewEncoder(w).Encode(r) +} + +func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) { + return json.Marshal(p.underlying) +} diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go new file mode 100644 index 0000000..f8074c0 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/log.go @@ -0,0 +1,80 @@ +package metrics + +import ( + "time" +) + +type Logger interface { + Printf(format string, v ...interface{}) +} + +func Log(r Registry, freq time.Duration, l Logger) { + LogScaled(r, freq, time.Nanosecond, l) +} + +// Output each metric in the given registry periodically using the given +// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos. +func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) { + du := float64(scale) + duSuffix := scale.String()[1:] + + for _ = range time.Tick(freq) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + l.Printf("counter %s\n", name) + l.Printf(" count: %9d\n", metric.Count()) + case Gauge: + l.Printf("gauge %s\n", name) + l.Printf(" value: %9d\n", metric.Value()) + case GaugeFloat64: + l.Printf("gauge %s\n", name) + l.Printf(" value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + l.Printf("healthcheck %s\n", name) + l.Printf(" error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("histogram %s\n", name) + l.Printf(" count: %9d\n", h.Count()) + l.Printf(" min: %9d\n", h.Min()) + l.Printf(" max: %9d\n", h.Max()) + l.Printf(" mean: %12.2f\n", h.Mean()) + l.Printf(" stddev: %12.2f\n", h.StdDev()) + l.Printf(" median: %12.2f\n", ps[0]) + l.Printf(" 75%%: %12.2f\n", ps[1]) + l.Printf(" 95%%: %12.2f\n", ps[2]) + l.Printf(" 99%%: %12.2f\n", ps[3]) + l.Printf(" 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + l.Printf("meter %s\n", name) + l.Printf(" count: %9d\n", m.Count()) + l.Printf(" 1-min rate: %12.2f\n", m.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", m.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", m.Rate15()) + l.Printf(" mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + l.Printf("timer %s\n", name) + l.Printf(" count: %9d\n", t.Count()) + l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix) + l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix) + l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix) + l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix) + l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix) + l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix) + l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix) + l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix) + l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix) + l.Printf(" 1-min rate: %12.2f\n", t.Rate1()) + l.Printf(" 5-min rate: %12.2f\n", t.Rate5()) + l.Printf(" 15-min rate: %12.2f\n", t.Rate15()) + l.Printf(" mean rate: %12.2f\n", t.RateMean()) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go new file mode 100644 index 0000000..0389ab0 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/meter.go @@ -0,0 +1,233 @@ +package metrics + +import ( + "sync" + "time" +) + +// Meters count events to produce exponentially-weighted moving average rates +// at one-, five-, and fifteen-minutes and a mean rate. +type Meter interface { + Count() int64 + Mark(int64) + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Meter +} + +// GetOrRegisterMeter returns an existing Meter or constructs and registers a +// new StandardMeter. +func GetOrRegisterMeter(name string, r Registry) Meter { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewMeter).(Meter) +} + +// NewMeter constructs a new StandardMeter and launches a goroutine. +func NewMeter() Meter { + if UseNilMetrics { + return NilMeter{} + } + m := newStandardMeter() + arbiter.Lock() + defer arbiter.Unlock() + arbiter.meters = append(arbiter.meters, m) + if !arbiter.started { + arbiter.started = true + go arbiter.tick() + } + return m +} + +// NewMeter constructs and registers a new StandardMeter and launches a +// goroutine. +func NewRegisteredMeter(name string, r Registry) Meter { + c := NewMeter() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// MeterSnapshot is a read-only copy of another Meter. +type MeterSnapshot struct { + count int64 + rate1, rate5, rate15, rateMean float64 +} + +// Count returns the count of events at the time the snapshot was taken. +func (m *MeterSnapshot) Count() int64 { return m.count } + +// Mark panics. +func (*MeterSnapshot) Mark(n int64) { + panic("Mark called on a MeterSnapshot") +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (m *MeterSnapshot) Rate1() float64 { return m.rate1 } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (m *MeterSnapshot) Rate5() float64 { return m.rate5 } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (m *MeterSnapshot) Rate15() float64 { return m.rate15 } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (m *MeterSnapshot) RateMean() float64 { return m.rateMean } + +// Snapshot returns the snapshot. +func (m *MeterSnapshot) Snapshot() Meter { return m } + +// NilMeter is a no-op Meter. +type NilMeter struct{} + +// Count is a no-op. +func (NilMeter) Count() int64 { return 0 } + +// Mark is a no-op. +func (NilMeter) Mark(n int64) {} + +// Rate1 is a no-op. +func (NilMeter) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilMeter) Rate5() float64 { return 0.0 } + +// Rate15is a no-op. +func (NilMeter) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilMeter) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilMeter) Snapshot() Meter { return NilMeter{} } + +// StandardMeter is the standard implementation of a Meter. +type StandardMeter struct { + lock sync.RWMutex + snapshot *MeterSnapshot + a1, a5, a15 EWMA + startTime time.Time +} + +func newStandardMeter() *StandardMeter { + return &StandardMeter{ + snapshot: &MeterSnapshot{}, + a1: NewEWMA1(), + a5: NewEWMA5(), + a15: NewEWMA15(), + startTime: time.Now(), + } +} + +// Count returns the number of events recorded. +func (m *StandardMeter) Count() int64 { + m.lock.RLock() + count := m.snapshot.count + m.lock.RUnlock() + return count +} + +// Mark records the occurance of n events. +func (m *StandardMeter) Mark(n int64) { + m.lock.Lock() + defer m.lock.Unlock() + m.snapshot.count += n + m.a1.Update(n) + m.a5.Update(n) + m.a15.Update(n) + m.updateSnapshot() +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (m *StandardMeter) Rate1() float64 { + m.lock.RLock() + rate1 := m.snapshot.rate1 + m.lock.RUnlock() + return rate1 +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (m *StandardMeter) Rate5() float64 { + m.lock.RLock() + rate5 := m.snapshot.rate5 + m.lock.RUnlock() + return rate5 +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (m *StandardMeter) Rate15() float64 { + m.lock.RLock() + rate15 := m.snapshot.rate15 + m.lock.RUnlock() + return rate15 +} + +// RateMean returns the meter's mean rate of events per second. +func (m *StandardMeter) RateMean() float64 { + m.lock.RLock() + rateMean := m.snapshot.rateMean + m.lock.RUnlock() + return rateMean +} + +// Snapshot returns a read-only copy of the meter. +func (m *StandardMeter) Snapshot() Meter { + m.lock.RLock() + snapshot := *m.snapshot + m.lock.RUnlock() + return &snapshot +} + +func (m *StandardMeter) updateSnapshot() { + // should run with write lock held on m.lock + snapshot := m.snapshot + snapshot.rate1 = m.a1.Rate() + snapshot.rate5 = m.a5.Rate() + snapshot.rate15 = m.a15.Rate() + snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds() +} + +func (m *StandardMeter) tick() { + m.lock.Lock() + defer m.lock.Unlock() + m.a1.Tick() + m.a5.Tick() + m.a15.Tick() + m.updateSnapshot() +} + +type meterArbiter struct { + sync.RWMutex + started bool + meters []*StandardMeter + ticker *time.Ticker +} + +var arbiter = meterArbiter{ticker: time.NewTicker(5e9)} + +// Ticks meters on the scheduled interval +func (ma *meterArbiter) tick() { + for { + select { + case <-ma.ticker.C: + ma.tickMeters() + } + } +} + +func (ma *meterArbiter) tickMeters() { + ma.RLock() + defer ma.RUnlock() + for _, meter := range ma.meters { + meter.tick() + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go new file mode 100644 index 0000000..b97a49e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/metrics.go @@ -0,0 +1,13 @@ +// Go port of Coda Hale's Metrics library +// +// +// +// Coda Hale's original work: +package metrics + +// UseNilMetrics is checked by the constructor functions for all of the +// standard metrics. If it is true, the metric returned is a stub. +// +// This global kill-switch helps quantify the observer effect and makes +// for less cluttered pprof profiles. +var UseNilMetrics bool = false diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go new file mode 100644 index 0000000..266b6c9 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/opentsdb.go @@ -0,0 +1,119 @@ +package metrics + +import ( + "bufio" + "fmt" + "log" + "net" + "os" + "strings" + "time" +) + +var shortHostName string = "" + +// OpenTSDBConfig provides a container with configuration parameters for +// the OpenTSDB exporter +type OpenTSDBConfig struct { + Addr *net.TCPAddr // Network address to connect to + Registry Registry // Registry to be exported + FlushInterval time.Duration // Flush interval + DurationUnit time.Duration // Time conversion unit for durations + Prefix string // Prefix to be prepended to metric names +} + +// OpenTSDB is a blocking exporter function which reports metrics in r +// to a TSDB server located at addr, flushing them every d duration +// and prepending metric names with prefix. +func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) { + OpenTSDBWithConfig(OpenTSDBConfig{ + Addr: addr, + Registry: r, + FlushInterval: d, + DurationUnit: time.Nanosecond, + Prefix: prefix, + }) +} + +// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB, +// but it takes a OpenTSDBConfig instead. +func OpenTSDBWithConfig(c OpenTSDBConfig) { + for _ = range time.Tick(c.FlushInterval) { + if err := openTSDB(&c); nil != err { + log.Println(err) + } + } +} + +func getShortHostname() string { + if shortHostName == "" { + host, _ := os.Hostname() + if index := strings.Index(host, "."); index > 0 { + shortHostName = host[:index] + } else { + shortHostName = host + } + } + return shortHostName +} + +func openTSDB(c *OpenTSDBConfig) error { + shortHostname := getShortHostname() + now := time.Now().Unix() + du := float64(c.DurationUnit) + conn, err := net.DialTCP("tcp", nil, c.Addr) + if nil != err { + return err + } + defer conn.Close() + w := bufio.NewWriter(conn) + c.Registry.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname) + case Gauge: + fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case GaugeFloat64: + fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname) + fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname) + fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname) + fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname) + fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname) + fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname) + } + w.Flush() + }) + return nil +} diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go new file mode 100644 index 0000000..2bb7a1e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/registry.go @@ -0,0 +1,270 @@ +package metrics + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// DuplicateMetric is the error returned by Registry.Register when a metric +// already exists. If you mean to Register that metric you must first +// Unregister the existing metric. +type DuplicateMetric string + +func (err DuplicateMetric) Error() string { + return fmt.Sprintf("duplicate metric: %s", string(err)) +} + +// A Registry holds references to a set of metrics by name and can iterate +// over them, calling callback functions provided by the user. +// +// This is an interface so as to encourage other structs to implement +// the Registry API as appropriate. +type Registry interface { + + // Call the given function for each registered metric. + Each(func(string, interface{})) + + // Get the metric by the given name or nil if none is registered. + Get(string) interface{} + + // Gets an existing metric or registers the given one. + // The interface can be the metric to register if not found in registry, + // or a function returning the metric for lazy instantiation. + GetOrRegister(string, interface{}) interface{} + + // Register the given metric under the given name. + Register(string, interface{}) error + + // Run all registered healthchecks. + RunHealthchecks() + + // Unregister the metric with the given name. + Unregister(string) + + // Unregister all metrics. (Mostly for testing.) + UnregisterAll() +} + +// The standard implementation of a Registry is a mutex-protected map +// of names to metrics. +type StandardRegistry struct { + metrics map[string]interface{} + mutex sync.Mutex +} + +// Create a new registry. +func NewRegistry() Registry { + return &StandardRegistry{metrics: make(map[string]interface{})} +} + +// Call the given function for each registered metric. +func (r *StandardRegistry) Each(f func(string, interface{})) { + for name, i := range r.registered() { + f(name, i) + } +} + +// Get the metric by the given name or nil if none is registered. +func (r *StandardRegistry) Get(name string) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.metrics[name] +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + if metric, ok := r.metrics[name]; ok { + return metric + } + if v := reflect.ValueOf(i); v.Kind() == reflect.Func { + i = v.Call(nil)[0].Interface() + } + r.register(name, i) + return i +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func (r *StandardRegistry) Register(name string, i interface{}) error { + r.mutex.Lock() + defer r.mutex.Unlock() + return r.register(name, i) +} + +// Run all registered healthchecks. +func (r *StandardRegistry) RunHealthchecks() { + r.mutex.Lock() + defer r.mutex.Unlock() + for _, i := range r.metrics { + if h, ok := i.(Healthcheck); ok { + h.Check() + } + } +} + +// Unregister the metric with the given name. +func (r *StandardRegistry) Unregister(name string) { + r.mutex.Lock() + defer r.mutex.Unlock() + delete(r.metrics, name) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *StandardRegistry) UnregisterAll() { + r.mutex.Lock() + defer r.mutex.Unlock() + for name, _ := range r.metrics { + delete(r.metrics, name) + } +} + +func (r *StandardRegistry) register(name string, i interface{}) error { + if _, ok := r.metrics[name]; ok { + return DuplicateMetric(name) + } + switch i.(type) { + case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer: + r.metrics[name] = i + } + return nil +} + +func (r *StandardRegistry) registered() map[string]interface{} { + r.mutex.Lock() + defer r.mutex.Unlock() + metrics := make(map[string]interface{}, len(r.metrics)) + for name, i := range r.metrics { + metrics[name] = i + } + return metrics +} + +type PrefixedRegistry struct { + underlying Registry + prefix string +} + +func NewPrefixedRegistry(prefix string) Registry { + return &PrefixedRegistry{ + underlying: NewRegistry(), + prefix: prefix, + } +} + +func NewPrefixedChildRegistry(parent Registry, prefix string) Registry { + return &PrefixedRegistry{ + underlying: parent, + prefix: prefix, + } +} + +// Call the given function for each registered metric. +func (r *PrefixedRegistry) Each(fn func(string, interface{})) { + wrappedFn := func(prefix string) func(string, interface{}) { + return func(name string, iface interface{}) { + if strings.HasPrefix(name, prefix) { + fn(name, iface) + } else { + return + } + } + } + + baseRegistry, prefix := findPrefix(r, "") + baseRegistry.Each(wrappedFn(prefix)) +} + +func findPrefix(registry Registry, prefix string) (Registry, string) { + switch r := registry.(type) { + case *PrefixedRegistry: + return findPrefix(r.underlying, r.prefix+prefix) + case *StandardRegistry: + return r, prefix + } + return nil, "" +} + +// Get the metric by the given name or nil if none is registered. +func (r *PrefixedRegistry) Get(name string) interface{} { + realName := r.prefix + name + return r.underlying.Get(realName) +} + +// Gets an existing metric or registers the given one. +// The interface can be the metric to register if not found in registry, +// or a function returning the metric for lazy instantiation. +func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} { + realName := r.prefix + name + return r.underlying.GetOrRegister(realName, metric) +} + +// Register the given metric under the given name. The name will be prefixed. +func (r *PrefixedRegistry) Register(name string, metric interface{}) error { + realName := r.prefix + name + return r.underlying.Register(realName, metric) +} + +// Run all registered healthchecks. +func (r *PrefixedRegistry) RunHealthchecks() { + r.underlying.RunHealthchecks() +} + +// Unregister the metric with the given name. The name will be prefixed. +func (r *PrefixedRegistry) Unregister(name string) { + realName := r.prefix + name + r.underlying.Unregister(realName) +} + +// Unregister all metrics. (Mostly for testing.) +func (r *PrefixedRegistry) UnregisterAll() { + r.underlying.UnregisterAll() +} + +var DefaultRegistry Registry = NewRegistry() + +// Call the given function for each registered metric. +func Each(f func(string, interface{})) { + DefaultRegistry.Each(f) +} + +// Get the metric by the given name or nil if none is registered. +func Get(name string) interface{} { + return DefaultRegistry.Get(name) +} + +// Gets an existing metric or creates and registers a new one. Threadsafe +// alternative to calling Get and Register on failure. +func GetOrRegister(name string, i interface{}) interface{} { + return DefaultRegistry.GetOrRegister(name, i) +} + +// Register the given metric under the given name. Returns a DuplicateMetric +// if a metric by the given name is already registered. +func Register(name string, i interface{}) error { + return DefaultRegistry.Register(name, i) +} + +// Register the given metric under the given name. Panics if a metric by the +// given name is already registered. +func MustRegister(name string, i interface{}) { + if err := Register(name, i); err != nil { + panic(err) + } +} + +// Run all registered healthchecks. +func RunHealthchecks() { + DefaultRegistry.RunHealthchecks() +} + +// Unregister the metric with the given name. +func Unregister(name string) { + DefaultRegistry.Unregister(name) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go new file mode 100644 index 0000000..11c6b78 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime.go @@ -0,0 +1,212 @@ +package metrics + +import ( + "runtime" + "runtime/pprof" + "time" +) + +var ( + memStats runtime.MemStats + runtimeMetrics struct { + MemStats struct { + Alloc Gauge + BuckHashSys Gauge + DebugGC Gauge + EnableGC Gauge + Frees Gauge + HeapAlloc Gauge + HeapIdle Gauge + HeapInuse Gauge + HeapObjects Gauge + HeapReleased Gauge + HeapSys Gauge + LastGC Gauge + Lookups Gauge + Mallocs Gauge + MCacheInuse Gauge + MCacheSys Gauge + MSpanInuse Gauge + MSpanSys Gauge + NextGC Gauge + NumGC Gauge + GCCPUFraction GaugeFloat64 + PauseNs Histogram + PauseTotalNs Gauge + StackInuse Gauge + StackSys Gauge + Sys Gauge + TotalAlloc Gauge + } + NumCgoCall Gauge + NumGoroutine Gauge + NumThread Gauge + ReadMemStats Timer + } + frees uint64 + lookups uint64 + mallocs uint64 + numGC uint32 + numCgoCalls int64 + + threadCreateProfile = pprof.Lookup("threadcreate") +) + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called as a goroutine. +func CaptureRuntimeMemStats(r Registry, d time.Duration) { + for _ = range time.Tick(d) { + CaptureRuntimeMemStatsOnce(r) + } +} + +// Capture new values for the Go runtime statistics exported in +// runtime.MemStats. This is designed to be called in a background +// goroutine. Giving a registry which has not been given to +// RegisterRuntimeMemStats will panic. +// +// Be very careful with this because runtime.ReadMemStats calls the C +// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld() +// and that last one does what it says on the tin. +func CaptureRuntimeMemStatsOnce(r Registry) { + t := time.Now() + runtime.ReadMemStats(&memStats) // This takes 50-200us. + runtimeMetrics.ReadMemStats.UpdateSince(t) + + runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc)) + runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys)) + if memStats.DebugGC { + runtimeMetrics.MemStats.DebugGC.Update(1) + } else { + runtimeMetrics.MemStats.DebugGC.Update(0) + } + if memStats.EnableGC { + runtimeMetrics.MemStats.EnableGC.Update(1) + } else { + runtimeMetrics.MemStats.EnableGC.Update(0) + } + + runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees)) + runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc)) + runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle)) + runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse)) + runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects)) + runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased)) + runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys)) + runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC)) + runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups)) + runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs)) + runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse)) + runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys)) + runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse)) + runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys)) + runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC)) + runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC)) + runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats)) + + // + i := numGC % uint32(len(memStats.PauseNs)) + ii := memStats.NumGC % uint32(len(memStats.PauseNs)) + if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) { + for i = 0; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } else { + if i > ii { + for ; i < uint32(len(memStats.PauseNs)); i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + i = 0 + } + for ; i < ii; i++ { + runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i])) + } + } + frees = memStats.Frees + lookups = memStats.Lookups + mallocs = memStats.Mallocs + numGC = memStats.NumGC + + runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs)) + runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse)) + runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys)) + runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys)) + runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc)) + + currentNumCgoCalls := numCgoCall() + runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls) + numCgoCalls = currentNumCgoCalls + + runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine())) + + runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count())) +} + +// Register runtimeMetrics for the Go runtime statistics exported in runtime and +// specifically runtime.MemStats. The runtimeMetrics are named by their +// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc. +func RegisterRuntimeMemStats(r Registry) { + runtimeMetrics.MemStats.Alloc = NewGauge() + runtimeMetrics.MemStats.BuckHashSys = NewGauge() + runtimeMetrics.MemStats.DebugGC = NewGauge() + runtimeMetrics.MemStats.EnableGC = NewGauge() + runtimeMetrics.MemStats.Frees = NewGauge() + runtimeMetrics.MemStats.HeapAlloc = NewGauge() + runtimeMetrics.MemStats.HeapIdle = NewGauge() + runtimeMetrics.MemStats.HeapInuse = NewGauge() + runtimeMetrics.MemStats.HeapObjects = NewGauge() + runtimeMetrics.MemStats.HeapReleased = NewGauge() + runtimeMetrics.MemStats.HeapSys = NewGauge() + runtimeMetrics.MemStats.LastGC = NewGauge() + runtimeMetrics.MemStats.Lookups = NewGauge() + runtimeMetrics.MemStats.Mallocs = NewGauge() + runtimeMetrics.MemStats.MCacheInuse = NewGauge() + runtimeMetrics.MemStats.MCacheSys = NewGauge() + runtimeMetrics.MemStats.MSpanInuse = NewGauge() + runtimeMetrics.MemStats.MSpanSys = NewGauge() + runtimeMetrics.MemStats.NextGC = NewGauge() + runtimeMetrics.MemStats.NumGC = NewGauge() + runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64() + runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015)) + runtimeMetrics.MemStats.PauseTotalNs = NewGauge() + runtimeMetrics.MemStats.StackInuse = NewGauge() + runtimeMetrics.MemStats.StackSys = NewGauge() + runtimeMetrics.MemStats.Sys = NewGauge() + runtimeMetrics.MemStats.TotalAlloc = NewGauge() + runtimeMetrics.NumCgoCall = NewGauge() + runtimeMetrics.NumGoroutine = NewGauge() + runtimeMetrics.NumThread = NewGauge() + runtimeMetrics.ReadMemStats = NewTimer() + + r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc) + r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys) + r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC) + r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC) + r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees) + r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc) + r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle) + r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse) + r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects) + r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased) + r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys) + r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC) + r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups) + r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs) + r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse) + r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys) + r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse) + r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys) + r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC) + r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC) + r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction) + r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs) + r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs) + r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse) + r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys) + r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys) + r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc) + r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall) + r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine) + r.Register("runtime.NumThread", runtimeMetrics.NumThread) + r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats) +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go new file mode 100644 index 0000000..e3391f4 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go @@ -0,0 +1,10 @@ +// +build cgo +// +build !appengine + +package metrics + +import "runtime" + +func numCgoCall() int64 { + return runtime.NumCgoCall() +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go new file mode 100644 index 0000000..ca12c05 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go @@ -0,0 +1,9 @@ +// +build go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return memStats.GCCPUFraction +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go new file mode 100644 index 0000000..616a3b4 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go @@ -0,0 +1,7 @@ +// +build !cgo appengine + +package metrics + +func numCgoCall() int64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go new file mode 100644 index 0000000..be96aa6 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go @@ -0,0 +1,9 @@ +// +build !go1.5 + +package metrics + +import "runtime" + +func gcCPUFraction(memStats *runtime.MemStats) float64 { + return 0 +} diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go new file mode 100644 index 0000000..fecee5e --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/sample.go @@ -0,0 +1,616 @@ +package metrics + +import ( + "math" + "math/rand" + "sort" + "sync" + "time" +) + +const rescaleThreshold = time.Hour + +// Samples maintain a statistically-significant selection of values from +// a stream. +type Sample interface { + Clear() + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Size() int + Snapshot() Sample + StdDev() float64 + Sum() int64 + Update(int64) + Values() []int64 + Variance() float64 +} + +// ExpDecaySample is an exponentially-decaying sample using a forward-decaying +// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time +// Decay Model for Streaming Systems". +// +// +type ExpDecaySample struct { + alpha float64 + count int64 + mutex sync.Mutex + reservoirSize int + t0, t1 time.Time + values *expDecaySampleHeap +} + +// NewExpDecaySample constructs a new exponentially-decaying sample with the +// given reservoir size and alpha. +func NewExpDecaySample(reservoirSize int, alpha float64) Sample { + if UseNilMetrics { + return NilSample{} + } + s := &ExpDecaySample{ + alpha: alpha, + reservoirSize: reservoirSize, + t0: time.Now(), + values: newExpDecaySampleHeap(reservoirSize), + } + s.t1 = s.t0.Add(rescaleThreshold) + return s +} + +// Clear clears all samples. +func (s *ExpDecaySample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.t0 = time.Now() + s.t1 = s.t0.Add(rescaleThreshold) + s.values.Clear() +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *ExpDecaySample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *ExpDecaySample) Max() int64 { + return SampleMax(s.Values()) +} + +// Mean returns the mean of the values in the sample. +func (s *ExpDecaySample) Mean() float64 { + return SampleMean(s.Values()) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *ExpDecaySample) Min() int64 { + return SampleMin(s.Values()) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *ExpDecaySample) Percentile(p float64) float64 { + return SamplePercentile(s.Values(), p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *ExpDecaySample) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.Values(), ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *ExpDecaySample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.values.Size() +} + +// Snapshot returns a read-only copy of the sample. +func (s *ExpDecaySample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *ExpDecaySample) StdDev() float64 { + return SampleStdDev(s.Values()) +} + +// Sum returns the sum of the values in the sample. +func (s *ExpDecaySample) Sum() int64 { + return SampleSum(s.Values()) +} + +// Update samples a new value. +func (s *ExpDecaySample) Update(v int64) { + s.update(time.Now(), v) +} + +// Values returns a copy of the values in the sample. +func (s *ExpDecaySample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + vals := s.values.Values() + values := make([]int64, len(vals)) + for i, v := range vals { + values[i] = v.v + } + return values +} + +// Variance returns the variance of the values in the sample. +func (s *ExpDecaySample) Variance() float64 { + return SampleVariance(s.Values()) +} + +// update samples a new value at a particular timestamp. This is a method all +// its own to facilitate testing. +func (s *ExpDecaySample) update(t time.Time, v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if s.values.Size() == s.reservoirSize { + s.values.Pop() + } + s.values.Push(expDecaySample{ + k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(), + v: v, + }) + if t.After(s.t1) { + values := s.values.Values() + t0 := s.t0 + s.values.Clear() + s.t0 = t + s.t1 = s.t0.Add(rescaleThreshold) + for _, v := range values { + v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds()) + s.values.Push(v) + } + } +} + +// NilSample is a no-op Sample. +type NilSample struct{} + +// Clear is a no-op. +func (NilSample) Clear() {} + +// Count is a no-op. +func (NilSample) Count() int64 { return 0 } + +// Max is a no-op. +func (NilSample) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilSample) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilSample) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilSample) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilSample) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Size is a no-op. +func (NilSample) Size() int { return 0 } + +// Sample is a no-op. +func (NilSample) Snapshot() Sample { return NilSample{} } + +// StdDev is a no-op. +func (NilSample) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilSample) Sum() int64 { return 0 } + +// Update is a no-op. +func (NilSample) Update(v int64) {} + +// Values is a no-op. +func (NilSample) Values() []int64 { return []int64{} } + +// Variance is a no-op. +func (NilSample) Variance() float64 { return 0.0 } + +// SampleMax returns the maximum value of the slice of int64. +func SampleMax(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var max int64 = math.MinInt64 + for _, v := range values { + if max < v { + max = v + } + } + return max +} + +// SampleMean returns the mean value of the slice of int64. +func SampleMean(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + return float64(SampleSum(values)) / float64(len(values)) +} + +// SampleMin returns the minimum value of the slice of int64. +func SampleMin(values []int64) int64 { + if 0 == len(values) { + return 0 + } + var min int64 = math.MaxInt64 + for _, v := range values { + if min > v { + min = v + } + } + return min +} + +// SamplePercentiles returns an arbitrary percentile of the slice of int64. +func SamplePercentile(values int64Slice, p float64) float64 { + return SamplePercentiles(values, []float64{p})[0] +} + +// SamplePercentiles returns a slice of arbitrary percentiles of the slice of +// int64. +func SamplePercentiles(values int64Slice, ps []float64) []float64 { + scores := make([]float64, len(ps)) + size := len(values) + if size > 0 { + sort.Sort(values) + for i, p := range ps { + pos := p * float64(size+1) + if pos < 1.0 { + scores[i] = float64(values[0]) + } else if pos >= float64(size) { + scores[i] = float64(values[size-1]) + } else { + lower := float64(values[int(pos)-1]) + upper := float64(values[int(pos)]) + scores[i] = lower + (pos-math.Floor(pos))*(upper-lower) + } + } + } + return scores +} + +// SampleSnapshot is a read-only copy of another Sample. +type SampleSnapshot struct { + count int64 + values []int64 +} + +func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot { + return &SampleSnapshot{ + count: count, + values: values, + } +} + +// Clear panics. +func (*SampleSnapshot) Clear() { + panic("Clear called on a SampleSnapshot") +} + +// Count returns the count of inputs at the time the snapshot was taken. +func (s *SampleSnapshot) Count() int64 { return s.count } + +// Max returns the maximal value at the time the snapshot was taken. +func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) } + +// Mean returns the mean value at the time the snapshot was taken. +func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) } + +// Min returns the minimal value at the time the snapshot was taken. +func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) } + +// Percentile returns an arbitrary percentile of values at the time the +// snapshot was taken. +func (s *SampleSnapshot) Percentile(p float64) float64 { + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values at the time +// the snapshot was taken. +func (s *SampleSnapshot) Percentiles(ps []float64) []float64 { + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample at the time the snapshot was taken. +func (s *SampleSnapshot) Size() int { return len(s.values) } + +// Snapshot returns the snapshot. +func (s *SampleSnapshot) Snapshot() Sample { return s } + +// StdDev returns the standard deviation of values at the time the snapshot was +// taken. +func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) } + +// Sum returns the sum of values at the time the snapshot was taken. +func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) } + +// Update panics. +func (*SampleSnapshot) Update(int64) { + panic("Update called on a SampleSnapshot") +} + +// Values returns a copy of the values in the sample. +func (s *SampleSnapshot) Values() []int64 { + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of values at the time the snapshot was taken. +func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) } + +// SampleStdDev returns the standard deviation of the slice of int64. +func SampleStdDev(values []int64) float64 { + return math.Sqrt(SampleVariance(values)) +} + +// SampleSum returns the sum of the slice of int64. +func SampleSum(values []int64) int64 { + var sum int64 + for _, v := range values { + sum += v + } + return sum +} + +// SampleVariance returns the variance of the slice of int64. +func SampleVariance(values []int64) float64 { + if 0 == len(values) { + return 0.0 + } + m := SampleMean(values) + var sum float64 + for _, v := range values { + d := float64(v) - m + sum += d * d + } + return sum / float64(len(values)) +} + +// A uniform sample using Vitter's Algorithm R. +// +// +type UniformSample struct { + count int64 + mutex sync.Mutex + reservoirSize int + values []int64 +} + +// NewUniformSample constructs a new uniform sample with the given reservoir +// size. +func NewUniformSample(reservoirSize int) Sample { + if UseNilMetrics { + return NilSample{} + } + return &UniformSample{ + reservoirSize: reservoirSize, + values: make([]int64, 0, reservoirSize), + } +} + +// Clear clears all samples. +func (s *UniformSample) Clear() { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count = 0 + s.values = make([]int64, 0, s.reservoirSize) +} + +// Count returns the number of samples recorded, which may exceed the +// reservoir size. +func (s *UniformSample) Count() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return s.count +} + +// Max returns the maximum value in the sample, which may not be the maximum +// value ever to be part of the sample. +func (s *UniformSample) Max() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMax(s.values) +} + +// Mean returns the mean of the values in the sample. +func (s *UniformSample) Mean() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMean(s.values) +} + +// Min returns the minimum value in the sample, which may not be the minimum +// value ever to be part of the sample. +func (s *UniformSample) Min() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleMin(s.values) +} + +// Percentile returns an arbitrary percentile of values in the sample. +func (s *UniformSample) Percentile(p float64) float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentile(s.values, p) +} + +// Percentiles returns a slice of arbitrary percentiles of values in the +// sample. +func (s *UniformSample) Percentiles(ps []float64) []float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SamplePercentiles(s.values, ps) +} + +// Size returns the size of the sample, which is at most the reservoir size. +func (s *UniformSample) Size() int { + s.mutex.Lock() + defer s.mutex.Unlock() + return len(s.values) +} + +// Snapshot returns a read-only copy of the sample. +func (s *UniformSample) Snapshot() Sample { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return &SampleSnapshot{ + count: s.count, + values: values, + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (s *UniformSample) StdDev() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleStdDev(s.values) +} + +// Sum returns the sum of the values in the sample. +func (s *UniformSample) Sum() int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleSum(s.values) +} + +// Update samples a new value. +func (s *UniformSample) Update(v int64) { + s.mutex.Lock() + defer s.mutex.Unlock() + s.count++ + if len(s.values) < s.reservoirSize { + s.values = append(s.values, v) + } else { + r := rand.Int63n(s.count) + if r < int64(len(s.values)) { + s.values[int(r)] = v + } + } +} + +// Values returns a copy of the values in the sample. +func (s *UniformSample) Values() []int64 { + s.mutex.Lock() + defer s.mutex.Unlock() + values := make([]int64, len(s.values)) + copy(values, s.values) + return values +} + +// Variance returns the variance of the values in the sample. +func (s *UniformSample) Variance() float64 { + s.mutex.Lock() + defer s.mutex.Unlock() + return SampleVariance(s.values) +} + +// expDecaySample represents an individual sample in a heap. +type expDecaySample struct { + k float64 + v int64 +} + +func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap { + return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)} +} + +// expDecaySampleHeap is a min-heap of expDecaySamples. +// The internal implementation is copied from the standard library's container/heap +type expDecaySampleHeap struct { + s []expDecaySample +} + +func (h *expDecaySampleHeap) Clear() { + h.s = h.s[:0] +} + +func (h *expDecaySampleHeap) Push(s expDecaySample) { + n := len(h.s) + h.s = h.s[0 : n+1] + h.s[n] = s + h.up(n) +} + +func (h *expDecaySampleHeap) Pop() expDecaySample { + n := len(h.s) - 1 + h.s[0], h.s[n] = h.s[n], h.s[0] + h.down(0, n) + + n = len(h.s) + s := h.s[n-1] + h.s = h.s[0 : n-1] + return s +} + +func (h *expDecaySampleHeap) Size() int { + return len(h.s) +} + +func (h *expDecaySampleHeap) Values() []expDecaySample { + return h.s +} + +func (h *expDecaySampleHeap) up(j int) { + for { + i := (j - 1) / 2 // parent + if i == j || !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + j = i + } +} + +func (h *expDecaySampleHeap) down(i, n int) { + for { + j1 := 2*i + 1 + if j1 >= n || j1 < 0 { // j1 < 0 after int overflow + break + } + j := j1 // left child + if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) { + j = j2 // = 2*i + 2 // right child + } + if !(h.s[j].k < h.s[i].k) { + break + } + h.s[i], h.s[j] = h.s[j], h.s[i] + i = j + } +} + +type int64Slice []int64 + +func (p int64Slice) Len() int { return len(p) } +func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go new file mode 100644 index 0000000..693f190 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/syslog.go @@ -0,0 +1,78 @@ +// +build !windows + +package metrics + +import ( + "fmt" + "log/syslog" + "time" +) + +// Output each metric in the given registry to syslog periodically using +// the given syslogger. +func Syslog(r Registry, d time.Duration, w *syslog.Writer) { + for _ = range time.Tick(d) { + r.Each(func(name string, i interface{}) { + switch metric := i.(type) { + case Counter: + w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count())) + case Gauge: + w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value())) + case GaugeFloat64: + w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value())) + case Healthcheck: + metric.Check() + w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error())) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f", + name, + h.Count(), + h.Min(), + h.Max(), + h.Mean(), + h.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + )) + case Meter: + m := metric.Snapshot() + w.Info(fmt.Sprintf( + "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f", + name, + m.Count(), + m.Rate1(), + m.Rate5(), + m.Rate15(), + m.RateMean(), + )) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + w.Info(fmt.Sprintf( + "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f", + name, + t.Count(), + t.Min(), + t.Max(), + t.Mean(), + t.StdDev(), + ps[0], + ps[1], + ps[2], + ps[3], + ps[4], + t.Rate1(), + t.Rate5(), + t.Rate15(), + t.RateMean(), + )) + } + }) + } +} diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go new file mode 100644 index 0000000..17db8f8 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/timer.go @@ -0,0 +1,311 @@ +package metrics + +import ( + "sync" + "time" +) + +// Timers capture the duration and rate of events. +type Timer interface { + Count() int64 + Max() int64 + Mean() float64 + Min() int64 + Percentile(float64) float64 + Percentiles([]float64) []float64 + Rate1() float64 + Rate5() float64 + Rate15() float64 + RateMean() float64 + Snapshot() Timer + StdDev() float64 + Sum() int64 + Time(func()) + Update(time.Duration) + UpdateSince(time.Time) + Variance() float64 +} + +// GetOrRegisterTimer returns an existing Timer or constructs and registers a +// new StandardTimer. +func GetOrRegisterTimer(name string, r Registry) Timer { + if nil == r { + r = DefaultRegistry + } + return r.GetOrRegister(name, NewTimer).(Timer) +} + +// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter. +func NewCustomTimer(h Histogram, m Meter) Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: h, + meter: m, + } +} + +// NewRegisteredTimer constructs and registers a new StandardTimer. +func NewRegisteredTimer(name string, r Registry) Timer { + c := NewTimer() + if nil == r { + r = DefaultRegistry + } + r.Register(name, c) + return c +} + +// NewTimer constructs a new StandardTimer using an exponentially-decaying +// sample with the same reservoir size and alpha as UNIX load averages. +func NewTimer() Timer { + if UseNilMetrics { + return NilTimer{} + } + return &StandardTimer{ + histogram: NewHistogram(NewExpDecaySample(1028, 0.015)), + meter: NewMeter(), + } +} + +// NilTimer is a no-op Timer. +type NilTimer struct { + h Histogram + m Meter +} + +// Count is a no-op. +func (NilTimer) Count() int64 { return 0 } + +// Max is a no-op. +func (NilTimer) Max() int64 { return 0 } + +// Mean is a no-op. +func (NilTimer) Mean() float64 { return 0.0 } + +// Min is a no-op. +func (NilTimer) Min() int64 { return 0 } + +// Percentile is a no-op. +func (NilTimer) Percentile(p float64) float64 { return 0.0 } + +// Percentiles is a no-op. +func (NilTimer) Percentiles(ps []float64) []float64 { + return make([]float64, len(ps)) +} + +// Rate1 is a no-op. +func (NilTimer) Rate1() float64 { return 0.0 } + +// Rate5 is a no-op. +func (NilTimer) Rate5() float64 { return 0.0 } + +// Rate15 is a no-op. +func (NilTimer) Rate15() float64 { return 0.0 } + +// RateMean is a no-op. +func (NilTimer) RateMean() float64 { return 0.0 } + +// Snapshot is a no-op. +func (NilTimer) Snapshot() Timer { return NilTimer{} } + +// StdDev is a no-op. +func (NilTimer) StdDev() float64 { return 0.0 } + +// Sum is a no-op. +func (NilTimer) Sum() int64 { return 0 } + +// Time is a no-op. +func (NilTimer) Time(func()) {} + +// Update is a no-op. +func (NilTimer) Update(time.Duration) {} + +// UpdateSince is a no-op. +func (NilTimer) UpdateSince(time.Time) {} + +// Variance is a no-op. +func (NilTimer) Variance() float64 { return 0.0 } + +// StandardTimer is the standard implementation of a Timer and uses a Histogram +// and Meter. +type StandardTimer struct { + histogram Histogram + meter Meter + mutex sync.Mutex +} + +// Count returns the number of events recorded. +func (t *StandardTimer) Count() int64 { + return t.histogram.Count() +} + +// Max returns the maximum value in the sample. +func (t *StandardTimer) Max() int64 { + return t.histogram.Max() +} + +// Mean returns the mean of the values in the sample. +func (t *StandardTimer) Mean() float64 { + return t.histogram.Mean() +} + +// Min returns the minimum value in the sample. +func (t *StandardTimer) Min() int64 { + return t.histogram.Min() +} + +// Percentile returns an arbitrary percentile of the values in the sample. +func (t *StandardTimer) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of the values in the +// sample. +func (t *StandardTimer) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second. +func (t *StandardTimer) Rate1() float64 { + return t.meter.Rate1() +} + +// Rate5 returns the five-minute moving average rate of events per second. +func (t *StandardTimer) Rate5() float64 { + return t.meter.Rate5() +} + +// Rate15 returns the fifteen-minute moving average rate of events per second. +func (t *StandardTimer) Rate15() float64 { + return t.meter.Rate15() +} + +// RateMean returns the meter's mean rate of events per second. +func (t *StandardTimer) RateMean() float64 { + return t.meter.RateMean() +} + +// Snapshot returns a read-only copy of the timer. +func (t *StandardTimer) Snapshot() Timer { + t.mutex.Lock() + defer t.mutex.Unlock() + return &TimerSnapshot{ + histogram: t.histogram.Snapshot().(*HistogramSnapshot), + meter: t.meter.Snapshot().(*MeterSnapshot), + } +} + +// StdDev returns the standard deviation of the values in the sample. +func (t *StandardTimer) StdDev() float64 { + return t.histogram.StdDev() +} + +// Sum returns the sum in the sample. +func (t *StandardTimer) Sum() int64 { + return t.histogram.Sum() +} + +// Record the duration of the execution of the given function. +func (t *StandardTimer) Time(f func()) { + ts := time.Now() + f() + t.Update(time.Since(ts)) +} + +// Record the duration of an event. +func (t *StandardTimer) Update(d time.Duration) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(d)) + t.meter.Mark(1) +} + +// Record the duration of an event that started at a time and ends now. +func (t *StandardTimer) UpdateSince(ts time.Time) { + t.mutex.Lock() + defer t.mutex.Unlock() + t.histogram.Update(int64(time.Since(ts))) + t.meter.Mark(1) +} + +// Variance returns the variance of the values in the sample. +func (t *StandardTimer) Variance() float64 { + return t.histogram.Variance() +} + +// TimerSnapshot is a read-only copy of another Timer. +type TimerSnapshot struct { + histogram *HistogramSnapshot + meter *MeterSnapshot +} + +// Count returns the number of events recorded at the time the snapshot was +// taken. +func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() } + +// Max returns the maximum value at the time the snapshot was taken. +func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() } + +// Mean returns the mean value at the time the snapshot was taken. +func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() } + +// Min returns the minimum value at the time the snapshot was taken. +func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() } + +// Percentile returns an arbitrary percentile of sampled values at the time the +// snapshot was taken. +func (t *TimerSnapshot) Percentile(p float64) float64 { + return t.histogram.Percentile(p) +} + +// Percentiles returns a slice of arbitrary percentiles of sampled values at +// the time the snapshot was taken. +func (t *TimerSnapshot) Percentiles(ps []float64) []float64 { + return t.histogram.Percentiles(ps) +} + +// Rate1 returns the one-minute moving average rate of events per second at the +// time the snapshot was taken. +func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() } + +// Rate5 returns the five-minute moving average rate of events per second at +// the time the snapshot was taken. +func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() } + +// Rate15 returns the fifteen-minute moving average rate of events per second +// at the time the snapshot was taken. +func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() } + +// RateMean returns the meter's mean rate of events per second at the time the +// snapshot was taken. +func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() } + +// Snapshot returns the snapshot. +func (t *TimerSnapshot) Snapshot() Timer { return t } + +// StdDev returns the standard deviation of the values at the time the snapshot +// was taken. +func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() } + +// Sum returns the sum at the time the snapshot was taken. +func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() } + +// Time panics. +func (*TimerSnapshot) Time(func()) { + panic("Time called on a TimerSnapshot") +} + +// Update panics. +func (*TimerSnapshot) Update(time.Duration) { + panic("Update called on a TimerSnapshot") +} + +// UpdateSince panics. +func (*TimerSnapshot) UpdateSince(time.Time) { + panic("UpdateSince called on a TimerSnapshot") +} + +// Variance returns the variance of the values at the time the snapshot was +// taken. +func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() } diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go new file mode 100644 index 0000000..091e971 --- /dev/null +++ b/vendor/github.com/rcrowley/go-metrics/writer.go @@ -0,0 +1,100 @@ +package metrics + +import ( + "fmt" + "io" + "sort" + "time" +) + +// Write sorts writes each metric in the given registry periodically to the +// given io.Writer. +func Write(r Registry, d time.Duration, w io.Writer) { + for _ = range time.Tick(d) { + WriteOnce(r, w) + } +} + +// WriteOnce sorts and writes metrics in the given registry to the given +// io.Writer. +func WriteOnce(r Registry, w io.Writer) { + var namedMetrics namedMetricSlice + r.Each(func(name string, i interface{}) { + namedMetrics = append(namedMetrics, namedMetric{name, i}) + }) + + sort.Sort(namedMetrics) + for _, namedMetric := range namedMetrics { + switch metric := namedMetric.m.(type) { + case Counter: + fmt.Fprintf(w, "counter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", metric.Count()) + case Gauge: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %9d\n", metric.Value()) + case GaugeFloat64: + fmt.Fprintf(w, "gauge %s\n", namedMetric.name) + fmt.Fprintf(w, " value: %f\n", metric.Value()) + case Healthcheck: + metric.Check() + fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name) + fmt.Fprintf(w, " error: %v\n", metric.Error()) + case Histogram: + h := metric.Snapshot() + ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "histogram %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", h.Count()) + fmt.Fprintf(w, " min: %9d\n", h.Min()) + fmt.Fprintf(w, " max: %9d\n", h.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", h.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + case Meter: + m := metric.Snapshot() + fmt.Fprintf(w, "meter %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", m.Count()) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean()) + case Timer: + t := metric.Snapshot() + ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999}) + fmt.Fprintf(w, "timer %s\n", namedMetric.name) + fmt.Fprintf(w, " count: %9d\n", t.Count()) + fmt.Fprintf(w, " min: %9d\n", t.Min()) + fmt.Fprintf(w, " max: %9d\n", t.Max()) + fmt.Fprintf(w, " mean: %12.2f\n", t.Mean()) + fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev()) + fmt.Fprintf(w, " median: %12.2f\n", ps[0]) + fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1]) + fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2]) + fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3]) + fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4]) + fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1()) + fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5()) + fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15()) + fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean()) + } + } +} + +type namedMetric struct { + name string + m interface{} +} + +// namedMetricSlice is a slice of namedMetrics that implements sort.Interface. +type namedMetricSlice []namedMetric + +func (nms namedMetricSlice) Len() int { return len(nms) } + +func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] } + +func (nms namedMetricSlice) Less(i, j int) bool { + return nms[i].name < nms[j].name +} diff --git a/vendor/github.com/robfig/cron/constantdelay.go b/vendor/github.com/robfig/cron/constantdelay.go new file mode 100644 index 0000000..cd6e7b1 --- /dev/null +++ b/vendor/github.com/robfig/cron/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/cron.go b/vendor/github.com/robfig/cron/cron.go new file mode 100644 index 0000000..2318aeb --- /dev/null +++ b/vendor/github.com/robfig/cron/cron.go @@ -0,0 +1,259 @@ +package cron + +import ( + "log" + "runtime" + "sort" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + stop chan struct{} + add chan *Entry + snapshot chan []*Entry + running bool + ErrorLog *log.Logger + location *time.Location +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// The Schedule describes a job's duty cycle. +type Schedule interface { + // Return the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // The schedule on which this job should be run. + Schedule Schedule + + // The next time the job will run. This is the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // The last time this job was run. This is the zero time if the job has never + // been run. + Prev time.Time + + // The Job to run. + Job Job +} + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, in the Local time zone. +func New() *Cron { + return NewWithLocation(time.Now().Location()) +} + +// NewWithLocation returns a new Cron job runner. +func NewWithLocation(location *time.Location) *Cron { + return &Cron{ + entries: nil, + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan []*Entry), + running: false, + ErrorLog: nil, + location: location, + } +} + +// A wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +func (c *Cron) AddFunc(spec string, cmd func()) error { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +func (c *Cron) AddJob(spec string, cmd Job) error { + schedule, err := Parse(spec) + if err != nil { + return err + } + c.Schedule(schedule, cmd) + return nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +func (c *Cron) Schedule(schedule Schedule, cmd Job) { + entry := &Entry{ + Schedule: schedule, + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + return + } + + c.add <- entry +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []*Entry { + if c.running { + c.snapshot <- nil + x := <-c.snapshot + return x + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Start the cron scheduler in its own go-routine, or no-op if already started. +func (c *Cron) Start() { + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + if c.running { + return + } + c.running = true + c.run() +} + +func (c *Cron) runWithRecovery(j Job) { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + c.logf("cron: panic running job: %v\n%s", r, buf) + } + }() + j.Run() +} + +// Run the scheduler. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + go c.runWithRecovery(e.Job) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + + case <-c.snapshot: + c.snapshot <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + return + } + + break + } + } +} + +// Logs an error to stderr or to the configured error log +func (c *Cron) logf(format string, args ...interface{}) { + if c.ErrorLog != nil { + c.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +func (c *Cron) Stop() { + if !c.running { + return + } + c.stop <- struct{}{} + c.running = false +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []*Entry { + entries := []*Entry{} + for _, e := range c.entries { + entries = append(entries, &Entry{ + Schedule: e.Schedule, + Next: e.Next, + Prev: e.Prev, + Job: e.Job, + }) + } + return entries +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} diff --git a/vendor/github.com/robfig/cron/doc.go b/vendor/github.com/robfig/cron/doc.go new file mode 100644 index 0000000..3700cf6 --- /dev/null +++ b/vendor/github.com/robfig/cron/doc.go @@ -0,0 +1,129 @@ +/* +Package cron implements a cron spec parser and job runner. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 6 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Seconds | Yes | 0-59 | * / , - + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun", +and "sun" are equally accepted. + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 0 1 * * + @weekly | Run once a week, midnight on Sunday | 0 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates immediately, +and then every 1 hour, 30 minutes, 10 seconds. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +All interpretation and scheduling is done in the machine's local time zone (as +provided by the Go time package (http://www.golang.org/pkg/time). + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/parser.go b/vendor/github.com/robfig/cron/parser.go new file mode 100644 index 0000000..a5e83c0 --- /dev/null +++ b/vendor/github.com/robfig/cron/parser.go @@ -0,0 +1,380 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption + optionals int +} + +// Creates a custom Parser with custom options. +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + return Parser{options, optionals} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("Empty spec string") + } + if spec[0] == '@' && p.options&Descriptor > 0 { + return parseDescriptor(spec) + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if p.options&place > 0 { + max++ + } + } + min := max - p.optionals + + // Split fields on whitespace + fields := strings.Fields(spec) + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec) + } + return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec) + } + + // Fill in missing fields + fields = expandFields(fields, p.options) + + var err error + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + }, nil +} + +func expandFields(fields []string, options ParseOption) []string { + n := 0 + count := len(fields) + expFields := make([]string, len(places)) + copy(expFields, defaults) + for i, place := range places { + if options&place > 0 { + expFields[i] = fields[n] + n++ + } + if n == count { + break + } + } + return expFields +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given standardSpec +// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always +// pass 5 entries representing: minute, hour, day of month, month and day of week, +// in that order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +var defaultParser = NewParser( + Second | Minute | Hour | Dom | Month | DowOptional | Descriptor, +) + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Full crontab specs, e.g. "* * * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func Parse(spec string) (Schedule, error) { + return defaultParser.Parse(spec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("Too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + default: + return 0, fmt.Errorf("Too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("Step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/spec.go b/vendor/github.com/robfig/cron/spec.go new file mode 100644 index 0000000..aac9a60 --- /dev/null +++ b/vendor/github.com/robfig/cron/spec.go @@ -0,0 +1,158 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach: + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +} diff --git a/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go new file mode 100644 index 0000000..1e1dff5 --- /dev/null +++ b/vendor/golang.org/x/crypto/nacl/secretbox/secretbox.go @@ -0,0 +1,149 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package secretbox encrypts and authenticates small messages. + +Secretbox uses XSalsa20 and Poly1305 to encrypt and authenticate messages with +secret-key cryptography. The length of messages is not hidden. + +It is the caller's responsibility to ensure the uniqueness of nonces—for +example, by using nonce 1 for the first message, nonce 2 for the second +message, etc. Nonces are long enough that randomly generated nonces have +negligible risk of collision. + +This package is interoperable with NaCl: https://nacl.cr.yp.to/secretbox.html. +*/ +package secretbox // import "golang.org/x/crypto/nacl/secretbox" + +import ( + "golang.org/x/crypto/poly1305" + "golang.org/x/crypto/salsa20/salsa" +) + +// Overhead is the number of bytes of overhead when boxing a message. +const Overhead = poly1305.TagSize + +// setup produces a sub-key and Salsa20 counter given a nonce and key. +func setup(subKey *[32]byte, counter *[16]byte, nonce *[24]byte, key *[32]byte) { + // We use XSalsa20 for encryption so first we need to generate a + // key and nonce with HSalsa20. + var hNonce [16]byte + copy(hNonce[:], nonce[:]) + salsa.HSalsa20(subKey, &hNonce, key, &salsa.Sigma) + + // The final 8 bytes of the original nonce form the new nonce. + copy(counter[:], nonce[16:]) +} + +// sliceForAppend takes a slice and a requested number of bytes. It returns a +// slice with the contents of the given slice followed by that many bytes and a +// second slice that aliases into it and contains only the extra bytes. If the +// original slice has sufficient capacity then no allocation is performed. +func sliceForAppend(in []byte, n int) (head, tail []byte) { + if total := len(in) + n; cap(in) >= total { + head = in[:total] + } else { + head = make([]byte, total) + copy(head, in) + } + tail = head[len(in):] + return +} + +// Seal appends an encrypted and authenticated copy of message to out, which +// must not overlap message. The key and nonce pair must be unique for each +// distinct message and the output will be Overhead bytes longer than message. +func Seal(out, message []byte, nonce *[24]byte, key *[32]byte) []byte { + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + + ret, out := sliceForAppend(out, len(message)+poly1305.TagSize) + + // We XOR up to 32 bytes of message with the keystream generated from + // the first block. + firstMessageBlock := message + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + + tagOut := out + out = out[poly1305.TagSize:] + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + message = message[len(firstMessageBlock):] + ciphertext := out + out = out[len(firstMessageBlock):] + + // Now encrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, message, &counter, &subKey) + + var tag [poly1305.TagSize]byte + poly1305.Sum(&tag, ciphertext, &poly1305Key) + copy(tagOut, tag[:]) + + return ret +} + +// Open authenticates and decrypts a box produced by Seal and appends the +// message to out, which must not overlap box. The output will be Overhead +// bytes smaller than box. +func Open(out []byte, box []byte, nonce *[24]byte, key *[32]byte) ([]byte, bool) { + if len(box) < Overhead { + return nil, false + } + + var subKey [32]byte + var counter [16]byte + setup(&subKey, &counter, nonce, key) + + // The Poly1305 key is generated by encrypting 32 bytes of zeros. Since + // Salsa20 works with 64-byte blocks, we also generate 32 bytes of + // keystream as a side effect. + var firstBlock [64]byte + salsa.XORKeyStream(firstBlock[:], firstBlock[:], &counter, &subKey) + + var poly1305Key [32]byte + copy(poly1305Key[:], firstBlock[:]) + var tag [poly1305.TagSize]byte + copy(tag[:], box) + + if !poly1305.Verify(&tag, box[poly1305.TagSize:], &poly1305Key) { + return nil, false + } + + ret, out := sliceForAppend(out, len(box)-Overhead) + + // We XOR up to 32 bytes of box with the keystream generated from + // the first block. + box = box[Overhead:] + firstMessageBlock := box + if len(firstMessageBlock) > 32 { + firstMessageBlock = firstMessageBlock[:32] + } + for i, x := range firstMessageBlock { + out[i] = firstBlock[32+i] ^ x + } + + box = box[len(firstMessageBlock):] + out = out[len(firstMessageBlock):] + + // Now decrypt the rest. + counter[8] = 1 + salsa.XORKeyStream(out, box, &counter, &subKey) + + return ret, true +} diff --git a/vendor/golang.org/x/crypto/poly1305/poly1305.go b/vendor/golang.org/x/crypto/poly1305/poly1305.go new file mode 100644 index 0000000..f562fa5 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/poly1305.go @@ -0,0 +1,33 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +/* +Package poly1305 implements Poly1305 one-time message authentication code as +specified in https://cr.yp.to/mac/poly1305-20050329.pdf. + +Poly1305 is a fast, one-time authentication function. It is infeasible for an +attacker to generate an authenticator for a message without the key. However, a +key must only be used for a single message. Authenticating two different +messages with the same key allows an attacker to forge authenticators for other +messages with the same key. + +Poly1305 was originally coupled with AES in order to make Poly1305-AES. AES was +used with a fixed key in order to generate one-time keys from an nonce. +However, in this package AES isn't used and the one-time key is specified +directly. +*/ +package poly1305 // import "golang.org/x/crypto/poly1305" + +import "crypto/subtle" + +// TagSize is the size, in bytes, of a poly1305 authenticator. +const TagSize = 16 + +// Verify returns true if mac is a valid authenticator for m with the given +// key. +func Verify(mac *[16]byte, m []byte, key *[32]byte) bool { + var tmp [16]byte + Sum(&tmp, m, key) + return subtle.ConstantTimeCompare(tmp[:], mac[:]) == 1 +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go new file mode 100644 index 0000000..4dd72fe --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.go @@ -0,0 +1,22 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +package poly1305 + +// This function is implemented in sum_amd64.s +//go:noescape +func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + poly1305(out, mPtr, uint64(len(m)), key) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_amd64.s b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s new file mode 100644 index 0000000..2edae63 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_amd64.s @@ -0,0 +1,125 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +#define POLY1305_ADD(msg, h0, h1, h2) \ + ADDQ 0(msg), h0; \ + ADCQ 8(msg), h1; \ + ADCQ $1, h2; \ + LEAQ 16(msg), msg + +#define POLY1305_MUL(h0, h1, h2, r0, r1, t0, t1, t2, t3) \ + MOVQ r0, AX; \ + MULQ h0; \ + MOVQ AX, t0; \ + MOVQ DX, t1; \ + MOVQ r0, AX; \ + MULQ h1; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ r0, t2; \ + IMULQ h2, t2; \ + ADDQ DX, t2; \ + \ + MOVQ r1, AX; \ + MULQ h0; \ + ADDQ AX, t1; \ + ADCQ $0, DX; \ + MOVQ DX, h0; \ + MOVQ r1, t3; \ + IMULQ h2, t3; \ + MOVQ r1, AX; \ + MULQ h1; \ + ADDQ AX, t2; \ + ADCQ DX, t3; \ + ADDQ h0, t2; \ + ADCQ $0, t3; \ + \ + MOVQ t0, h0; \ + MOVQ t1, h1; \ + MOVQ t2, h2; \ + ANDQ $3, h2; \ + MOVQ t2, t0; \ + ANDQ $0xFFFFFFFFFFFFFFFC, t0; \ + ADDQ t0, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2; \ + SHRQ $2, t3, t2; \ + SHRQ $2, t3; \ + ADDQ t2, h0; \ + ADCQ t3, h1; \ + ADCQ $0, h2 + +DATA ·poly1305Mask<>+0x00(SB)/8, $0x0FFFFFFC0FFFFFFF +DATA ·poly1305Mask<>+0x08(SB)/8, $0x0FFFFFFC0FFFFFFC +GLOBL ·poly1305Mask<>(SB), RODATA, $16 + +// func poly1305(out *[16]byte, m *byte, mlen uint64, key *[32]key) +TEXT ·poly1305(SB), $0-32 + MOVQ out+0(FP), DI + MOVQ m+8(FP), SI + MOVQ mlen+16(FP), R15 + MOVQ key+24(FP), AX + + MOVQ 0(AX), R11 + MOVQ 8(AX), R12 + ANDQ ·poly1305Mask<>(SB), R11 // r0 + ANDQ ·poly1305Mask<>+8(SB), R12 // r1 + XORQ R8, R8 // h0 + XORQ R9, R9 // h1 + XORQ R10, R10 // h2 + + CMPQ R15, $16 + JB bytes_between_0_and_15 + +loop: + POLY1305_ADD(SI, R8, R9, R10) + +multiply: + POLY1305_MUL(R8, R9, R10, R11, R12, BX, CX, R13, R14) + SUBQ $16, R15 + CMPQ R15, $16 + JAE loop + +bytes_between_0_and_15: + TESTQ R15, R15 + JZ done + MOVQ $1, BX + XORQ CX, CX + XORQ R13, R13 + ADDQ R15, SI + +flush_buffer: + SHLQ $8, BX, CX + SHLQ $8, BX + MOVB -1(SI), R13 + XORQ R13, BX + DECQ SI + DECQ R15 + JNZ flush_buffer + + ADDQ BX, R8 + ADCQ CX, R9 + ADCQ $0, R10 + MOVQ $16, R15 + JMP multiply + +done: + MOVQ R8, AX + MOVQ R9, BX + SUBQ $0xFFFFFFFFFFFFFFFB, AX + SBBQ $0xFFFFFFFFFFFFFFFF, BX + SBBQ $3, R10 + CMOVQCS R8, AX + CMOVQCS R9, BX + MOVQ key+24(FP), R8 + ADDQ 16(R8), AX + ADCQ 24(R8), BX + + MOVQ AX, 0(DI) + MOVQ BX, 8(DI) + RET diff --git a/vendor/golang.org/x/crypto/poly1305/sum_arm.go b/vendor/golang.org/x/crypto/poly1305/sum_arm.go new file mode 100644 index 0000000..5dc321c --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_arm.go @@ -0,0 +1,22 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build arm,!gccgo,!appengine,!nacl + +package poly1305 + +// This function is implemented in sum_arm.s +//go:noescape +func poly1305_auth_armv6(out *[16]byte, m *byte, mlen uint32, key *[32]byte) + +// Sum generates an authenticator for m using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[16]byte, m []byte, key *[32]byte) { + var mPtr *byte + if len(m) > 0 { + mPtr = &m[0] + } + poly1305_auth_armv6(out, mPtr, uint32(len(m)), key) +} diff --git a/vendor/golang.org/x/crypto/poly1305/sum_ref.go b/vendor/golang.org/x/crypto/poly1305/sum_ref.go new file mode 100644 index 0000000..b2805a5 --- /dev/null +++ b/vendor/golang.org/x/crypto/poly1305/sum_ref.go @@ -0,0 +1,141 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64,!arm gccgo appengine nacl + +package poly1305 + +import "encoding/binary" + +// Sum generates an authenticator for msg using a one-time key and puts the +// 16-byte result into out. Authenticating two different messages with the same +// key allows an attacker to forge messages at will. +func Sum(out *[TagSize]byte, msg []byte, key *[32]byte) { + var ( + h0, h1, h2, h3, h4 uint32 // the hash accumulators + r0, r1, r2, r3, r4 uint64 // the r part of the key + ) + + r0 = uint64(binary.LittleEndian.Uint32(key[0:]) & 0x3ffffff) + r1 = uint64((binary.LittleEndian.Uint32(key[3:]) >> 2) & 0x3ffff03) + r2 = uint64((binary.LittleEndian.Uint32(key[6:]) >> 4) & 0x3ffc0ff) + r3 = uint64((binary.LittleEndian.Uint32(key[9:]) >> 6) & 0x3f03fff) + r4 = uint64((binary.LittleEndian.Uint32(key[12:]) >> 8) & 0x00fffff) + + R1, R2, R3, R4 := r1*5, r2*5, r3*5, r4*5 + + for len(msg) >= TagSize { + // h += msg + h0 += binary.LittleEndian.Uint32(msg[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(msg[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(msg[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(msg[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(msg[12:]) >> 8) | (1 << 24) + + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) + + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff + + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff + + msg = msg[TagSize:] + } + + if len(msg) > 0 { + var block [TagSize]byte + off := copy(block[:], msg) + block[off] = 0x01 + + // h += msg + h0 += binary.LittleEndian.Uint32(block[0:]) & 0x3ffffff + h1 += (binary.LittleEndian.Uint32(block[3:]) >> 2) & 0x3ffffff + h2 += (binary.LittleEndian.Uint32(block[6:]) >> 4) & 0x3ffffff + h3 += (binary.LittleEndian.Uint32(block[9:]) >> 6) & 0x3ffffff + h4 += (binary.LittleEndian.Uint32(block[12:]) >> 8) + + // h *= r + d0 := (uint64(h0) * r0) + (uint64(h1) * R4) + (uint64(h2) * R3) + (uint64(h3) * R2) + (uint64(h4) * R1) + d1 := (d0 >> 26) + (uint64(h0) * r1) + (uint64(h1) * r0) + (uint64(h2) * R4) + (uint64(h3) * R3) + (uint64(h4) * R2) + d2 := (d1 >> 26) + (uint64(h0) * r2) + (uint64(h1) * r1) + (uint64(h2) * r0) + (uint64(h3) * R4) + (uint64(h4) * R3) + d3 := (d2 >> 26) + (uint64(h0) * r3) + (uint64(h1) * r2) + (uint64(h2) * r1) + (uint64(h3) * r0) + (uint64(h4) * R4) + d4 := (d3 >> 26) + (uint64(h0) * r4) + (uint64(h1) * r3) + (uint64(h2) * r2) + (uint64(h3) * r1) + (uint64(h4) * r0) + + // h %= p + h0 = uint32(d0) & 0x3ffffff + h1 = uint32(d1) & 0x3ffffff + h2 = uint32(d2) & 0x3ffffff + h3 = uint32(d3) & 0x3ffffff + h4 = uint32(d4) & 0x3ffffff + + h0 += uint32(d4>>26) * 5 + h1 += h0 >> 26 + h0 = h0 & 0x3ffffff + } + + // h %= p reduction + h2 += h1 >> 26 + h1 &= 0x3ffffff + h3 += h2 >> 26 + h2 &= 0x3ffffff + h4 += h3 >> 26 + h3 &= 0x3ffffff + h0 += 5 * (h4 >> 26) + h4 &= 0x3ffffff + h1 += h0 >> 26 + h0 &= 0x3ffffff + + // h - p + t0 := h0 + 5 + t1 := h1 + (t0 >> 26) + t2 := h2 + (t1 >> 26) + t3 := h3 + (t2 >> 26) + t4 := h4 + (t3 >> 26) - (1 << 26) + t0 &= 0x3ffffff + t1 &= 0x3ffffff + t2 &= 0x3ffffff + t3 &= 0x3ffffff + + // select h if h < p else h - p + t_mask := (t4 >> 31) - 1 + h_mask := ^t_mask + h0 = (h0 & h_mask) | (t0 & t_mask) + h1 = (h1 & h_mask) | (t1 & t_mask) + h2 = (h2 & h_mask) | (t2 & t_mask) + h3 = (h3 & h_mask) | (t3 & t_mask) + h4 = (h4 & h_mask) | (t4 & t_mask) + + // h %= 2^128 + h0 |= h1 << 26 + h1 = ((h1 >> 6) | (h2 << 20)) + h2 = ((h2 >> 12) | (h3 << 14)) + h3 = ((h3 >> 18) | (h4 << 8)) + + // s: the s part of the key + // tag = (h + s) % (2^128) + t := uint64(h0) + uint64(binary.LittleEndian.Uint32(key[16:])) + h0 = uint32(t) + t = uint64(h1) + uint64(binary.LittleEndian.Uint32(key[20:])) + (t >> 32) + h1 = uint32(t) + t = uint64(h2) + uint64(binary.LittleEndian.Uint32(key[24:])) + (t >> 32) + h2 = uint32(t) + t = uint64(h3) + uint64(binary.LittleEndian.Uint32(key[28:])) + (t >> 32) + h3 = uint32(t) + + binary.LittleEndian.PutUint32(out[0:], h0) + binary.LittleEndian.PutUint32(out[4:], h1) + binary.LittleEndian.PutUint32(out[8:], h2) + binary.LittleEndian.PutUint32(out[12:], h3) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go new file mode 100644 index 0000000..4c96147 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/hsalsa20.go @@ -0,0 +1,144 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package salsa provides low-level access to functions in the Salsa family. +package salsa // import "golang.org/x/crypto/salsa20/salsa" + +// Sigma is the Salsa20 constant for 256-bit keys. +var Sigma = [16]byte{'e', 'x', 'p', 'a', 'n', 'd', ' ', '3', '2', '-', 'b', 'y', 't', 'e', ' ', 'k'} + +// HSalsa20 applies the HSalsa20 core function to a 16-byte input in, 32-byte +// key k, and 16-byte constant c, and puts the result into the 32-byte array +// out. +func HSalsa20(out *[32]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + x0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + x1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + x2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + x3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + x4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + x5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + x6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + x7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + x8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + x9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + x10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + x11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + x12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + x13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + x14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + x15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + for i := 0; i < 20; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x5) + out[5] = byte(x5 >> 8) + out[6] = byte(x5 >> 16) + out[7] = byte(x5 >> 24) + + out[8] = byte(x10) + out[9] = byte(x10 >> 8) + out[10] = byte(x10 >> 16) + out[11] = byte(x10 >> 24) + + out[12] = byte(x15) + out[13] = byte(x15 >> 8) + out[14] = byte(x15 >> 16) + out[15] = byte(x15 >> 24) + + out[16] = byte(x6) + out[17] = byte(x6 >> 8) + out[18] = byte(x6 >> 16) + out[19] = byte(x6 >> 24) + + out[20] = byte(x7) + out[21] = byte(x7 >> 8) + out[22] = byte(x7 >> 16) + out[23] = byte(x7 >> 24) + + out[24] = byte(x8) + out[25] = byte(x8 >> 8) + out[26] = byte(x8 >> 16) + out[27] = byte(x8 >> 24) + + out[28] = byte(x9) + out[29] = byte(x9 >> 8) + out[30] = byte(x9 >> 16) + out[31] = byte(x9 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s new file mode 100644 index 0000000..22afbdc --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa2020_amd64.s @@ -0,0 +1,889 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +// This code was translated into a form compatible with 6a from the public +// domain sources in SUPERCOP: https://bench.cr.yp.to/supercop.html + +// func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) +// This needs up to 64 bytes at 360(SP); hence the non-obvious frame size. +TEXT ·salsa2020XORKeyStream(SB),0,$456-40 // frame = 424 + 32 byte alignment + MOVQ out+0(FP),DI + MOVQ in+8(FP),SI + MOVQ n+16(FP),DX + MOVQ nonce+24(FP),CX + MOVQ key+32(FP),R8 + + MOVQ SP,R12 + MOVQ SP,R9 + ADDQ $31, R9 + ANDQ $~31, R9 + MOVQ R9, SP + + MOVQ DX,R9 + MOVQ CX,DX + MOVQ R8,R10 + CMPQ R9,$0 + JBE DONE + START: + MOVL 20(R10),CX + MOVL 0(R10),R8 + MOVL 0(DX),AX + MOVL 16(R10),R11 + MOVL CX,0(SP) + MOVL R8, 4 (SP) + MOVL AX, 8 (SP) + MOVL R11, 12 (SP) + MOVL 8(DX),CX + MOVL 24(R10),R8 + MOVL 4(R10),AX + MOVL 4(DX),R11 + MOVL CX,16(SP) + MOVL R8, 20 (SP) + MOVL AX, 24 (SP) + MOVL R11, 28 (SP) + MOVL 12(DX),CX + MOVL 12(R10),DX + MOVL 28(R10),R8 + MOVL 8(R10),AX + MOVL DX,32(SP) + MOVL CX, 36 (SP) + MOVL R8, 40 (SP) + MOVL AX, 44 (SP) + MOVQ $1634760805,DX + MOVQ $857760878,CX + MOVQ $2036477234,R8 + MOVQ $1797285236,AX + MOVL DX,48(SP) + MOVL CX, 52 (SP) + MOVL R8, 56 (SP) + MOVL AX, 60 (SP) + CMPQ R9,$256 + JB BYTESBETWEEN1AND255 + MOVOA 48(SP),X0 + PSHUFL $0X55,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X3 + PSHUFL $0X00,X0,X0 + MOVOA X1,64(SP) + MOVOA X2,80(SP) + MOVOA X3,96(SP) + MOVOA X0,112(SP) + MOVOA 0(SP),X0 + PSHUFL $0XAA,X0,X1 + PSHUFL $0XFF,X0,X2 + PSHUFL $0X00,X0,X3 + PSHUFL $0X55,X0,X0 + MOVOA X1,128(SP) + MOVOA X2,144(SP) + MOVOA X3,160(SP) + MOVOA X0,176(SP) + MOVOA 16(SP),X0 + PSHUFL $0XFF,X0,X1 + PSHUFL $0X55,X0,X2 + PSHUFL $0XAA,X0,X0 + MOVOA X1,192(SP) + MOVOA X2,208(SP) + MOVOA X0,224(SP) + MOVOA 32(SP),X0 + PSHUFL $0X00,X0,X1 + PSHUFL $0XAA,X0,X2 + PSHUFL $0XFF,X0,X0 + MOVOA X1,240(SP) + MOVOA X2,256(SP) + MOVOA X0,272(SP) + BYTESATLEAST256: + MOVL 16(SP),DX + MOVL 36 (SP),CX + MOVL DX,288(SP) + MOVL CX,304(SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 292 (SP) + MOVL CX, 308 (SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 296 (SP) + MOVL CX, 312 (SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX, 300 (SP) + MOVL CX, 316 (SP) + ADDQ $1,DX + SHLQ $32,CX + ADDQ CX,DX + MOVQ DX,CX + SHRQ $32,CX + MOVL DX,16(SP) + MOVL CX, 36 (SP) + MOVQ R9,352(SP) + MOVQ $20,DX + MOVOA 64(SP),X0 + MOVOA 80(SP),X1 + MOVOA 96(SP),X2 + MOVOA 256(SP),X3 + MOVOA 272(SP),X4 + MOVOA 128(SP),X5 + MOVOA 144(SP),X6 + MOVOA 176(SP),X7 + MOVOA 192(SP),X8 + MOVOA 208(SP),X9 + MOVOA 224(SP),X10 + MOVOA 304(SP),X11 + MOVOA 112(SP),X12 + MOVOA 160(SP),X13 + MOVOA 240(SP),X14 + MOVOA 288(SP),X15 + MAINLOOP1: + MOVOA X1,320(SP) + MOVOA X2,336(SP) + MOVOA X13,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X14 + PSRLL $25,X2 + PXOR X2,X14 + MOVOA X7,X1 + PADDL X0,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X11 + PSRLL $25,X2 + PXOR X2,X11 + MOVOA X12,X1 + PADDL X14,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X15 + PSRLL $23,X2 + PXOR X2,X15 + MOVOA X0,X1 + PADDL X11,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X9 + PSRLL $23,X2 + PXOR X2,X9 + MOVOA X14,X1 + PADDL X15,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X13 + PSRLL $19,X2 + PXOR X2,X13 + MOVOA X11,X1 + PADDL X9,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X7 + PSRLL $19,X2 + PXOR X2,X7 + MOVOA X15,X1 + PADDL X13,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA 320(SP),X1 + MOVOA X12,320(SP) + MOVOA X9,X2 + PADDL X7,X2 + MOVOA X2,X12 + PSLLL $18,X2 + PXOR X2,X0 + PSRLL $14,X12 + PXOR X12,X0 + MOVOA X5,X2 + PADDL X1,X2 + MOVOA X2,X12 + PSLLL $7,X2 + PXOR X2,X3 + PSRLL $25,X12 + PXOR X12,X3 + MOVOA 336(SP),X2 + MOVOA X0,336(SP) + MOVOA X6,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X4 + PSRLL $25,X12 + PXOR X12,X4 + MOVOA X1,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X10 + PSRLL $23,X12 + PXOR X12,X10 + MOVOA X2,X0 + PADDL X4,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X8 + PSRLL $23,X12 + PXOR X12,X8 + MOVOA X3,X0 + PADDL X10,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X5 + PSRLL $19,X12 + PXOR X12,X5 + MOVOA X4,X0 + PADDL X8,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X6 + PSRLL $19,X12 + PXOR X12,X6 + MOVOA X10,X0 + PADDL X5,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA 320(SP),X0 + MOVOA X1,320(SP) + MOVOA X4,X1 + PADDL X0,X1 + MOVOA X1,X12 + PSLLL $7,X1 + PXOR X1,X7 + PSRLL $25,X12 + PXOR X12,X7 + MOVOA X8,X1 + PADDL X6,X1 + MOVOA X1,X12 + PSLLL $18,X1 + PXOR X1,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 336(SP),X12 + MOVOA X2,336(SP) + MOVOA X14,X1 + PADDL X12,X1 + MOVOA X1,X2 + PSLLL $7,X1 + PXOR X1,X5 + PSRLL $25,X2 + PXOR X2,X5 + MOVOA X0,X1 + PADDL X7,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X10 + PSRLL $23,X2 + PXOR X2,X10 + MOVOA X12,X1 + PADDL X5,X1 + MOVOA X1,X2 + PSLLL $9,X1 + PXOR X1,X8 + PSRLL $23,X2 + PXOR X2,X8 + MOVOA X7,X1 + PADDL X10,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X4 + PSRLL $19,X2 + PXOR X2,X4 + MOVOA X5,X1 + PADDL X8,X1 + MOVOA X1,X2 + PSLLL $13,X1 + PXOR X1,X14 + PSRLL $19,X2 + PXOR X2,X14 + MOVOA X10,X1 + PADDL X4,X1 + MOVOA X1,X2 + PSLLL $18,X1 + PXOR X1,X0 + PSRLL $14,X2 + PXOR X2,X0 + MOVOA 320(SP),X1 + MOVOA X0,320(SP) + MOVOA X8,X0 + PADDL X14,X0 + MOVOA X0,X2 + PSLLL $18,X0 + PXOR X0,X12 + PSRLL $14,X2 + PXOR X2,X12 + MOVOA X11,X0 + PADDL X1,X0 + MOVOA X0,X2 + PSLLL $7,X0 + PXOR X0,X6 + PSRLL $25,X2 + PXOR X2,X6 + MOVOA 336(SP),X2 + MOVOA X12,336(SP) + MOVOA X3,X0 + PADDL X2,X0 + MOVOA X0,X12 + PSLLL $7,X0 + PXOR X0,X13 + PSRLL $25,X12 + PXOR X12,X13 + MOVOA X1,X0 + PADDL X6,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X15 + PSRLL $23,X12 + PXOR X12,X15 + MOVOA X2,X0 + PADDL X13,X0 + MOVOA X0,X12 + PSLLL $9,X0 + PXOR X0,X9 + PSRLL $23,X12 + PXOR X12,X9 + MOVOA X6,X0 + PADDL X15,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X11 + PSRLL $19,X12 + PXOR X12,X11 + MOVOA X13,X0 + PADDL X9,X0 + MOVOA X0,X12 + PSLLL $13,X0 + PXOR X0,X3 + PSRLL $19,X12 + PXOR X12,X3 + MOVOA X15,X0 + PADDL X11,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X1 + PSRLL $14,X12 + PXOR X12,X1 + MOVOA X9,X0 + PADDL X3,X0 + MOVOA X0,X12 + PSLLL $18,X0 + PXOR X0,X2 + PSRLL $14,X12 + PXOR X12,X2 + MOVOA 320(SP),X12 + MOVOA 336(SP),X0 + SUBQ $2,DX + JA MAINLOOP1 + PADDL 112(SP),X12 + PADDL 176(SP),X7 + PADDL 224(SP),X10 + PADDL 272(SP),X4 + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 0(SI),DX + XORL 4(SI),CX + XORL 8(SI),R8 + XORL 12(SI),R9 + MOVL DX,0(DI) + MOVL CX,4(DI) + MOVL R8,8(DI) + MOVL R9,12(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 64(SI),DX + XORL 68(SI),CX + XORL 72(SI),R8 + XORL 76(SI),R9 + MOVL DX,64(DI) + MOVL CX,68(DI) + MOVL R8,72(DI) + MOVL R9,76(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + PSHUFL $0X39,X12,X12 + PSHUFL $0X39,X7,X7 + PSHUFL $0X39,X10,X10 + PSHUFL $0X39,X4,X4 + XORL 128(SI),DX + XORL 132(SI),CX + XORL 136(SI),R8 + XORL 140(SI),R9 + MOVL DX,128(DI) + MOVL CX,132(DI) + MOVL R8,136(DI) + MOVL R9,140(DI) + MOVD X12,DX + MOVD X7,CX + MOVD X10,R8 + MOVD X4,R9 + XORL 192(SI),DX + XORL 196(SI),CX + XORL 200(SI),R8 + XORL 204(SI),R9 + MOVL DX,192(DI) + MOVL CX,196(DI) + MOVL R8,200(DI) + MOVL R9,204(DI) + PADDL 240(SP),X14 + PADDL 64(SP),X0 + PADDL 128(SP),X5 + PADDL 192(SP),X8 + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 16(SI),DX + XORL 20(SI),CX + XORL 24(SI),R8 + XORL 28(SI),R9 + MOVL DX,16(DI) + MOVL CX,20(DI) + MOVL R8,24(DI) + MOVL R9,28(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 80(SI),DX + XORL 84(SI),CX + XORL 88(SI),R8 + XORL 92(SI),R9 + MOVL DX,80(DI) + MOVL CX,84(DI) + MOVL R8,88(DI) + MOVL R9,92(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + PSHUFL $0X39,X14,X14 + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X5,X5 + PSHUFL $0X39,X8,X8 + XORL 144(SI),DX + XORL 148(SI),CX + XORL 152(SI),R8 + XORL 156(SI),R9 + MOVL DX,144(DI) + MOVL CX,148(DI) + MOVL R8,152(DI) + MOVL R9,156(DI) + MOVD X14,DX + MOVD X0,CX + MOVD X5,R8 + MOVD X8,R9 + XORL 208(SI),DX + XORL 212(SI),CX + XORL 216(SI),R8 + XORL 220(SI),R9 + MOVL DX,208(DI) + MOVL CX,212(DI) + MOVL R8,216(DI) + MOVL R9,220(DI) + PADDL 288(SP),X15 + PADDL 304(SP),X11 + PADDL 80(SP),X1 + PADDL 144(SP),X6 + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 32(SI),DX + XORL 36(SI),CX + XORL 40(SI),R8 + XORL 44(SI),R9 + MOVL DX,32(DI) + MOVL CX,36(DI) + MOVL R8,40(DI) + MOVL R9,44(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 96(SI),DX + XORL 100(SI),CX + XORL 104(SI),R8 + XORL 108(SI),R9 + MOVL DX,96(DI) + MOVL CX,100(DI) + MOVL R8,104(DI) + MOVL R9,108(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + PSHUFL $0X39,X15,X15 + PSHUFL $0X39,X11,X11 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X6,X6 + XORL 160(SI),DX + XORL 164(SI),CX + XORL 168(SI),R8 + XORL 172(SI),R9 + MOVL DX,160(DI) + MOVL CX,164(DI) + MOVL R8,168(DI) + MOVL R9,172(DI) + MOVD X15,DX + MOVD X11,CX + MOVD X1,R8 + MOVD X6,R9 + XORL 224(SI),DX + XORL 228(SI),CX + XORL 232(SI),R8 + XORL 236(SI),R9 + MOVL DX,224(DI) + MOVL CX,228(DI) + MOVL R8,232(DI) + MOVL R9,236(DI) + PADDL 160(SP),X13 + PADDL 208(SP),X9 + PADDL 256(SP),X3 + PADDL 96(SP),X2 + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 48(SI),DX + XORL 52(SI),CX + XORL 56(SI),R8 + XORL 60(SI),R9 + MOVL DX,48(DI) + MOVL CX,52(DI) + MOVL R8,56(DI) + MOVL R9,60(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 112(SI),DX + XORL 116(SI),CX + XORL 120(SI),R8 + XORL 124(SI),R9 + MOVL DX,112(DI) + MOVL CX,116(DI) + MOVL R8,120(DI) + MOVL R9,124(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + PSHUFL $0X39,X13,X13 + PSHUFL $0X39,X9,X9 + PSHUFL $0X39,X3,X3 + PSHUFL $0X39,X2,X2 + XORL 176(SI),DX + XORL 180(SI),CX + XORL 184(SI),R8 + XORL 188(SI),R9 + MOVL DX,176(DI) + MOVL CX,180(DI) + MOVL R8,184(DI) + MOVL R9,188(DI) + MOVD X13,DX + MOVD X9,CX + MOVD X3,R8 + MOVD X2,R9 + XORL 240(SI),DX + XORL 244(SI),CX + XORL 248(SI),R8 + XORL 252(SI),R9 + MOVL DX,240(DI) + MOVL CX,244(DI) + MOVL R8,248(DI) + MOVL R9,252(DI) + MOVQ 352(SP),R9 + SUBQ $256,R9 + ADDQ $256,SI + ADDQ $256,DI + CMPQ R9,$256 + JAE BYTESATLEAST256 + CMPQ R9,$0 + JBE DONE + BYTESBETWEEN1AND255: + CMPQ R9,$64 + JAE NOCOPY + MOVQ DI,DX + LEAQ 360(SP),DI + MOVQ R9,CX + REP; MOVSB + LEAQ 360(SP),DI + LEAQ 360(SP),SI + NOCOPY: + MOVQ R9,352(SP) + MOVOA 48(SP),X0 + MOVOA 0(SP),X1 + MOVOA 16(SP),X2 + MOVOA 32(SP),X3 + MOVOA X1,X4 + MOVQ $20,CX + MAINLOOP2: + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X3 + PXOR X6,X3 + PADDL X3,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X3,X3 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X1 + PSHUFL $0X4E,X2,X2 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X3,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X1,X1 + PXOR X6,X0 + PADDL X0,X4 + MOVOA X0,X5 + MOVOA X4,X6 + PSLLL $7,X4 + PSRLL $25,X6 + PXOR X4,X1 + PXOR X6,X1 + PADDL X1,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $9,X5 + PSRLL $23,X6 + PXOR X5,X2 + PSHUFL $0X93,X1,X1 + PXOR X6,X2 + PADDL X2,X4 + MOVOA X2,X5 + MOVOA X4,X6 + PSLLL $13,X4 + PSRLL $19,X6 + PXOR X4,X3 + PSHUFL $0X4E,X2,X2 + PXOR X6,X3 + SUBQ $4,CX + PADDL X3,X5 + MOVOA X1,X4 + MOVOA X5,X6 + PSLLL $18,X5 + PXOR X7,X7 + PSRLL $14,X6 + PXOR X5,X0 + PSHUFL $0X39,X3,X3 + PXOR X6,X0 + JA MAINLOOP2 + PADDL 48(SP),X0 + PADDL 0(SP),X1 + PADDL 16(SP),X2 + PADDL 32(SP),X3 + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 0(SI),CX + XORL 48(SI),R8 + XORL 32(SI),R9 + XORL 16(SI),AX + MOVL CX,0(DI) + MOVL R8,48(DI) + MOVL R9,32(DI) + MOVL AX,16(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 20(SI),CX + XORL 4(SI),R8 + XORL 52(SI),R9 + XORL 36(SI),AX + MOVL CX,20(DI) + MOVL R8,4(DI) + MOVL R9,52(DI) + MOVL AX,36(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + PSHUFL $0X39,X0,X0 + PSHUFL $0X39,X1,X1 + PSHUFL $0X39,X2,X2 + PSHUFL $0X39,X3,X3 + XORL 40(SI),CX + XORL 24(SI),R8 + XORL 8(SI),R9 + XORL 56(SI),AX + MOVL CX,40(DI) + MOVL R8,24(DI) + MOVL R9,8(DI) + MOVL AX,56(DI) + MOVD X0,CX + MOVD X1,R8 + MOVD X2,R9 + MOVD X3,AX + XORL 60(SI),CX + XORL 44(SI),R8 + XORL 28(SI),R9 + XORL 12(SI),AX + MOVL CX,60(DI) + MOVL R8,44(DI) + MOVL R9,28(DI) + MOVL AX,12(DI) + MOVQ 352(SP),R9 + MOVL 16(SP),CX + MOVL 36 (SP),R8 + ADDQ $1,CX + SHLQ $32,R8 + ADDQ R8,CX + MOVQ CX,R8 + SHRQ $32,R8 + MOVL CX,16(SP) + MOVL R8, 36 (SP) + CMPQ R9,$64 + JA BYTESATLEAST65 + JAE BYTESATLEAST64 + MOVQ DI,SI + MOVQ DX,DI + MOVQ R9,CX + REP; MOVSB + BYTESATLEAST64: + DONE: + MOVQ R12,SP + RET + BYTESATLEAST65: + SUBQ $64,R9 + ADDQ $64,DI + ADDQ $64,SI + JMP BYTESBETWEEN1AND255 diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go new file mode 100644 index 0000000..9bfc092 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa208.go @@ -0,0 +1,199 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package salsa + +// Core208 applies the Salsa20/8 core function to the 64-byte array in and puts +// the result into the 64-byte array out. The input and output may be the same array. +func Core208(out *[64]byte, in *[64]byte) { + j0 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j1 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j2 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j3 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j4 := uint32(in[16]) | uint32(in[17])<<8 | uint32(in[18])<<16 | uint32(in[19])<<24 + j5 := uint32(in[20]) | uint32(in[21])<<8 | uint32(in[22])<<16 | uint32(in[23])<<24 + j6 := uint32(in[24]) | uint32(in[25])<<8 | uint32(in[26])<<16 | uint32(in[27])<<24 + j7 := uint32(in[28]) | uint32(in[29])<<8 | uint32(in[30])<<16 | uint32(in[31])<<24 + j8 := uint32(in[32]) | uint32(in[33])<<8 | uint32(in[34])<<16 | uint32(in[35])<<24 + j9 := uint32(in[36]) | uint32(in[37])<<8 | uint32(in[38])<<16 | uint32(in[39])<<24 + j10 := uint32(in[40]) | uint32(in[41])<<8 | uint32(in[42])<<16 | uint32(in[43])<<24 + j11 := uint32(in[44]) | uint32(in[45])<<8 | uint32(in[46])<<16 | uint32(in[47])<<24 + j12 := uint32(in[48]) | uint32(in[49])<<8 | uint32(in[50])<<16 | uint32(in[51])<<24 + j13 := uint32(in[52]) | uint32(in[53])<<8 | uint32(in[54])<<16 | uint32(in[55])<<24 + j14 := uint32(in[56]) | uint32(in[57])<<8 | uint32(in[58])<<16 | uint32(in[59])<<24 + j15 := uint32(in[60]) | uint32(in[61])<<8 | uint32(in[62])<<16 | uint32(in[63])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < 8; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go new file mode 100644 index 0000000..c34d362 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_amd64.go @@ -0,0 +1,24 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!appengine,!gccgo + +package salsa + +// This function is implemented in salsa2020_amd64.s. + +//go:noescape + +func salsa2020XORKeyStream(out, in *byte, n uint64, nonce, key *byte) + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out may be the same slice but otherwise should not overlap. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + if len(in) == 0 { + return + } + _ = out[len(in)-1] + salsa2020XORKeyStream(&out[0], &in[0], uint64(len(in)), &counter[0], &key[0]) +} diff --git a/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go new file mode 100644 index 0000000..95f8ca5 --- /dev/null +++ b/vendor/golang.org/x/crypto/salsa20/salsa/salsa20_ref.go @@ -0,0 +1,234 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package salsa + +const rounds = 20 + +// core applies the Salsa20 core function to 16-byte input in, 32-byte key k, +// and 16-byte constant c, and puts the result into 64-byte array out. +func core(out *[64]byte, in *[16]byte, k *[32]byte, c *[16]byte) { + j0 := uint32(c[0]) | uint32(c[1])<<8 | uint32(c[2])<<16 | uint32(c[3])<<24 + j1 := uint32(k[0]) | uint32(k[1])<<8 | uint32(k[2])<<16 | uint32(k[3])<<24 + j2 := uint32(k[4]) | uint32(k[5])<<8 | uint32(k[6])<<16 | uint32(k[7])<<24 + j3 := uint32(k[8]) | uint32(k[9])<<8 | uint32(k[10])<<16 | uint32(k[11])<<24 + j4 := uint32(k[12]) | uint32(k[13])<<8 | uint32(k[14])<<16 | uint32(k[15])<<24 + j5 := uint32(c[4]) | uint32(c[5])<<8 | uint32(c[6])<<16 | uint32(c[7])<<24 + j6 := uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24 + j7 := uint32(in[4]) | uint32(in[5])<<8 | uint32(in[6])<<16 | uint32(in[7])<<24 + j8 := uint32(in[8]) | uint32(in[9])<<8 | uint32(in[10])<<16 | uint32(in[11])<<24 + j9 := uint32(in[12]) | uint32(in[13])<<8 | uint32(in[14])<<16 | uint32(in[15])<<24 + j10 := uint32(c[8]) | uint32(c[9])<<8 | uint32(c[10])<<16 | uint32(c[11])<<24 + j11 := uint32(k[16]) | uint32(k[17])<<8 | uint32(k[18])<<16 | uint32(k[19])<<24 + j12 := uint32(k[20]) | uint32(k[21])<<8 | uint32(k[22])<<16 | uint32(k[23])<<24 + j13 := uint32(k[24]) | uint32(k[25])<<8 | uint32(k[26])<<16 | uint32(k[27])<<24 + j14 := uint32(k[28]) | uint32(k[29])<<8 | uint32(k[30])<<16 | uint32(k[31])<<24 + j15 := uint32(c[12]) | uint32(c[13])<<8 | uint32(c[14])<<16 | uint32(c[15])<<24 + + x0, x1, x2, x3, x4, x5, x6, x7, x8 := j0, j1, j2, j3, j4, j5, j6, j7, j8 + x9, x10, x11, x12, x13, x14, x15 := j9, j10, j11, j12, j13, j14, j15 + + for i := 0; i < rounds; i += 2 { + u := x0 + x12 + x4 ^= u<<7 | u>>(32-7) + u = x4 + x0 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x4 + x12 ^= u<<13 | u>>(32-13) + u = x12 + x8 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x1 + x9 ^= u<<7 | u>>(32-7) + u = x9 + x5 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x9 + x1 ^= u<<13 | u>>(32-13) + u = x1 + x13 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x6 + x14 ^= u<<7 | u>>(32-7) + u = x14 + x10 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x14 + x6 ^= u<<13 | u>>(32-13) + u = x6 + x2 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x11 + x3 ^= u<<7 | u>>(32-7) + u = x3 + x15 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x3 + x11 ^= u<<13 | u>>(32-13) + u = x11 + x7 + x15 ^= u<<18 | u>>(32-18) + + u = x0 + x3 + x1 ^= u<<7 | u>>(32-7) + u = x1 + x0 + x2 ^= u<<9 | u>>(32-9) + u = x2 + x1 + x3 ^= u<<13 | u>>(32-13) + u = x3 + x2 + x0 ^= u<<18 | u>>(32-18) + + u = x5 + x4 + x6 ^= u<<7 | u>>(32-7) + u = x6 + x5 + x7 ^= u<<9 | u>>(32-9) + u = x7 + x6 + x4 ^= u<<13 | u>>(32-13) + u = x4 + x7 + x5 ^= u<<18 | u>>(32-18) + + u = x10 + x9 + x11 ^= u<<7 | u>>(32-7) + u = x11 + x10 + x8 ^= u<<9 | u>>(32-9) + u = x8 + x11 + x9 ^= u<<13 | u>>(32-13) + u = x9 + x8 + x10 ^= u<<18 | u>>(32-18) + + u = x15 + x14 + x12 ^= u<<7 | u>>(32-7) + u = x12 + x15 + x13 ^= u<<9 | u>>(32-9) + u = x13 + x12 + x14 ^= u<<13 | u>>(32-13) + u = x14 + x13 + x15 ^= u<<18 | u>>(32-18) + } + x0 += j0 + x1 += j1 + x2 += j2 + x3 += j3 + x4 += j4 + x5 += j5 + x6 += j6 + x7 += j7 + x8 += j8 + x9 += j9 + x10 += j10 + x11 += j11 + x12 += j12 + x13 += j13 + x14 += j14 + x15 += j15 + + out[0] = byte(x0) + out[1] = byte(x0 >> 8) + out[2] = byte(x0 >> 16) + out[3] = byte(x0 >> 24) + + out[4] = byte(x1) + out[5] = byte(x1 >> 8) + out[6] = byte(x1 >> 16) + out[7] = byte(x1 >> 24) + + out[8] = byte(x2) + out[9] = byte(x2 >> 8) + out[10] = byte(x2 >> 16) + out[11] = byte(x2 >> 24) + + out[12] = byte(x3) + out[13] = byte(x3 >> 8) + out[14] = byte(x3 >> 16) + out[15] = byte(x3 >> 24) + + out[16] = byte(x4) + out[17] = byte(x4 >> 8) + out[18] = byte(x4 >> 16) + out[19] = byte(x4 >> 24) + + out[20] = byte(x5) + out[21] = byte(x5 >> 8) + out[22] = byte(x5 >> 16) + out[23] = byte(x5 >> 24) + + out[24] = byte(x6) + out[25] = byte(x6 >> 8) + out[26] = byte(x6 >> 16) + out[27] = byte(x6 >> 24) + + out[28] = byte(x7) + out[29] = byte(x7 >> 8) + out[30] = byte(x7 >> 16) + out[31] = byte(x7 >> 24) + + out[32] = byte(x8) + out[33] = byte(x8 >> 8) + out[34] = byte(x8 >> 16) + out[35] = byte(x8 >> 24) + + out[36] = byte(x9) + out[37] = byte(x9 >> 8) + out[38] = byte(x9 >> 16) + out[39] = byte(x9 >> 24) + + out[40] = byte(x10) + out[41] = byte(x10 >> 8) + out[42] = byte(x10 >> 16) + out[43] = byte(x10 >> 24) + + out[44] = byte(x11) + out[45] = byte(x11 >> 8) + out[46] = byte(x11 >> 16) + out[47] = byte(x11 >> 24) + + out[48] = byte(x12) + out[49] = byte(x12 >> 8) + out[50] = byte(x12 >> 16) + out[51] = byte(x12 >> 24) + + out[52] = byte(x13) + out[53] = byte(x13 >> 8) + out[54] = byte(x13 >> 16) + out[55] = byte(x13 >> 24) + + out[56] = byte(x14) + out[57] = byte(x14 >> 8) + out[58] = byte(x14 >> 16) + out[59] = byte(x14 >> 24) + + out[60] = byte(x15) + out[61] = byte(x15 >> 8) + out[62] = byte(x15 >> 16) + out[63] = byte(x15 >> 24) +} + +// XORKeyStream crypts bytes from in to out using the given key and counters. +// In and out may be the same slice but otherwise should not overlap. Counter +// contains the raw salsa20 counter bytes (both nonce and block counter). +func XORKeyStream(out, in []byte, counter *[16]byte, key *[32]byte) { + var block [64]byte + var counterCopy [16]byte + copy(counterCopy[:], counter[:]) + + for len(in) >= 64 { + core(&block, &counterCopy, key, &Sigma) + for i, x := range block { + out[i] = in[i] ^ x + } + u := uint32(1) + for i := 8; i < 16; i++ { + u += uint32(counterCopy[i]) + counterCopy[i] = byte(u) + u >>= 8 + } + in = in[64:] + out = out[64:] + } + + if len(in) > 0 { + core(&block, &counterCopy, key, &Sigma) + for i, v := range in { + out[i] = v ^ block[i] + } + } +} diff --git a/vendor/golang.org/x/net/context/context.go b/vendor/golang.org/x/net/context/context.go new file mode 100644 index 0000000..d3681ab --- /dev/null +++ b/vendor/golang.org/x/net/context/context.go @@ -0,0 +1,54 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package context defines the Context type, which carries deadlines, +// cancelation signals, and other request-scoped values across API boundaries +// and between processes. +// +// Incoming requests to a server should create a Context, and outgoing calls to +// servers should accept a Context. The chain of function calls between must +// propagate the Context, optionally replacing it with a modified copy created +// using WithDeadline, WithTimeout, WithCancel, or WithValue. +// +// Programs that use Contexts should follow these rules to keep interfaces +// consistent across packages and enable static analysis tools to check context +// propagation: +// +// Do not store Contexts inside a struct type; instead, pass a Context +// explicitly to each function that needs it. The Context should be the first +// parameter, typically named ctx: +// +// func DoSomething(ctx context.Context, arg Arg) error { +// // ... use ctx ... +// } +// +// Do not pass a nil Context, even if a function permits it. Pass context.TODO +// if you are unsure about which Context to use. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +// +// The same Context may be passed to functions running in different goroutines; +// Contexts are safe for simultaneous use by multiple goroutines. +// +// See http://blog.golang.org/context for example code for a server that uses +// Contexts. +package context // import "golang.org/x/net/context" + +// Background returns a non-nil, empty Context. It is never canceled, has no +// values, and has no deadline. It is typically used by the main function, +// initialization, and tests, and as the top-level Context for incoming +// requests. +func Background() Context { + return background +} + +// TODO returns a non-nil, empty Context. Code should use context.TODO when +// it's unclear which Context to use or it is not yet available (because the +// surrounding function has not yet been extended to accept a Context +// parameter). TODO is recognized by static analysis tools that determine +// whether Contexts are propagated correctly in a program. +func TODO() Context { + return todo +} diff --git a/vendor/golang.org/x/net/context/context_test.go b/vendor/golang.org/x/net/context/context_test.go new file mode 100644 index 0000000..6284413 --- /dev/null +++ b/vendor/golang.org/x/net/context/context_test.go @@ -0,0 +1,583 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "fmt" + "math/rand" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +// otherContext is a Context that's not one of the types defined in context.go. +// This lets us test code paths that differ based on the underlying type of the +// Context. +type otherContext struct { + Context +} + +func TestBackground(t *testing.T) { + c := Background() + if c == nil { + t.Fatalf("Background returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.Background"; got != want { + t.Errorf("Background().String() = %q want %q", got, want) + } +} + +func TestTODO(t *testing.T) { + c := TODO() + if c == nil { + t.Fatalf("TODO returned nil") + } + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + if got, want := fmt.Sprint(c), "context.TODO"; got != want { + t.Errorf("TODO().String() = %q want %q", got, want) + } +} + +func TestWithCancel(t *testing.T) { + c1, cancel := WithCancel(Background()) + + if got, want := fmt.Sprint(c1), "context.Background.WithCancel"; got != want { + t.Errorf("c1.String() = %q want %q", got, want) + } + + o := otherContext{c1} + c2, _ := WithCancel(o) + contexts := []Context{c1, o, c2} + + for i, c := range contexts { + if d := c.Done(); d == nil { + t.Errorf("c[%d].Done() == %v want non-nil", i, d) + } + if e := c.Err(); e != nil { + t.Errorf("c[%d].Err() == %v want nil", i, e) + } + + select { + case x := <-c.Done(): + t.Errorf("<-c.Done() == %v want nothing (it should block)", x) + default: + } + } + + cancel() + time.Sleep(100 * time.Millisecond) // let cancelation propagate + + for i, c := range contexts { + select { + case <-c.Done(): + default: + t.Errorf("<-c[%d].Done() blocked, but shouldn't have", i) + } + if e := c.Err(); e != Canceled { + t.Errorf("c[%d].Err() == %v want %v", i, e, Canceled) + } + } +} + +func TestParentFinishesChild(t *testing.T) { + // Context tree: + // parent -> cancelChild + // parent -> valueChild -> timerChild + parent, cancel := WithCancel(Background()) + cancelChild, stop := WithCancel(parent) + defer stop() + valueChild := WithValue(parent, "key", "value") + timerChild, stop := WithTimeout(valueChild, 10000*time.Hour) + defer stop() + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-cancelChild.Done(): + t.Errorf("<-cancelChild.Done() == %v want nothing (it should block)", x) + case x := <-timerChild.Done(): + t.Errorf("<-timerChild.Done() == %v want nothing (it should block)", x) + case x := <-valueChild.Done(): + t.Errorf("<-valueChild.Done() == %v want nothing (it should block)", x) + default: + } + + // The parent's children should contain the two cancelable children. + pc := parent.(*cancelCtx) + cc := cancelChild.(*cancelCtx) + tc := timerChild.(*timerCtx) + pc.mu.Lock() + if len(pc.children) != 2 || !pc.children[cc] || !pc.children[tc] { + t.Errorf("bad linkage: pc.children = %v, want %v and %v", + pc.children, cc, tc) + } + pc.mu.Unlock() + + if p, ok := parentCancelCtx(cc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(cancelChild.Context) = %v, %v want %v, true", p, ok, pc) + } + if p, ok := parentCancelCtx(tc.Context); !ok || p != pc { + t.Errorf("bad linkage: parentCancelCtx(timerChild.Context) = %v, %v want %v, true", p, ok, pc) + } + + cancel() + + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("pc.cancel didn't clear pc.children = %v", pc.children) + } + pc.mu.Unlock() + + // parent and children should all be finished. + check := func(ctx Context, name string) { + select { + case <-ctx.Done(): + default: + t.Errorf("<-%s.Done() blocked, but shouldn't have", name) + } + if e := ctx.Err(); e != Canceled { + t.Errorf("%s.Err() == %v want %v", name, e, Canceled) + } + } + check(parent, "parent") + check(cancelChild, "cancelChild") + check(valueChild, "valueChild") + check(timerChild, "timerChild") + + // WithCancel should return a canceled context on a canceled parent. + precanceledChild := WithValue(parent, "key", "value") + select { + case <-precanceledChild.Done(): + default: + t.Errorf("<-precanceledChild.Done() blocked, but shouldn't have") + } + if e := precanceledChild.Err(); e != Canceled { + t.Errorf("precanceledChild.Err() == %v want %v", e, Canceled) + } +} + +func TestChildFinishesFirst(t *testing.T) { + cancelable, stop := WithCancel(Background()) + defer stop() + for _, parent := range []Context{Background(), cancelable} { + child, cancel := WithCancel(parent) + + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + case x := <-child.Done(): + t.Errorf("<-child.Done() == %v want nothing (it should block)", x) + default: + } + + cc := child.(*cancelCtx) + pc, pcok := parent.(*cancelCtx) // pcok == false when parent == Background() + if p, ok := parentCancelCtx(cc.Context); ok != pcok || (ok && pc != p) { + t.Errorf("bad linkage: parentCancelCtx(cc.Context) = %v, %v want %v, %v", p, ok, pc, pcok) + } + + if pcok { + pc.mu.Lock() + if len(pc.children) != 1 || !pc.children[cc] { + t.Errorf("bad linkage: pc.children = %v, cc = %v", pc.children, cc) + } + pc.mu.Unlock() + } + + cancel() + + if pcok { + pc.mu.Lock() + if len(pc.children) != 0 { + t.Errorf("child's cancel didn't remove self from pc.children = %v", pc.children) + } + pc.mu.Unlock() + } + + // child should be finished. + select { + case <-child.Done(): + default: + t.Errorf("<-child.Done() blocked, but shouldn't have") + } + if e := child.Err(); e != Canceled { + t.Errorf("child.Err() == %v want %v", e, Canceled) + } + + // parent should not be finished. + select { + case x := <-parent.Done(): + t.Errorf("<-parent.Done() == %v want nothing (it should block)", x) + default: + } + if e := parent.Err(); e != nil { + t.Errorf("parent.Err() == %v want nil", e) + } + } +} + +func testDeadline(c Context, wait time.Duration, t *testing.T) { + select { + case <-time.After(wait): + t.Fatalf("context should have timed out") + case <-c.Done(): + } + if e := c.Err(); e != DeadlineExceeded { + t.Errorf("c.Err() == %v want %v", e, DeadlineExceeded) + } +} + +func TestDeadline(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithDeadline(Background(), time.Now().Add(1*timeUnit)) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithDeadline(Background(), time.Now().Add(1*timeUnit)) + o = otherContext{c} + c, _ = WithDeadline(o, time.Now().Add(3*timeUnit)) + testDeadline(c, 2*timeUnit, t) +} + +func TestTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 1*timeUnit) + if got, prefix := fmt.Sprint(c), "context.Background.WithDeadline("; !strings.HasPrefix(got, prefix) { + t.Errorf("c.String() = %q want prefix %q", got, prefix) + } + testDeadline(c, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o := otherContext{c} + testDeadline(o, 2*timeUnit, t) + + c, _ = WithTimeout(Background(), 1*timeUnit) + o = otherContext{c} + c, _ = WithTimeout(o, 3*timeUnit) + testDeadline(c, 2*timeUnit, t) +} + +func TestCanceledTimeout(t *testing.T) { + t.Parallel() + const timeUnit = 500 * time.Millisecond + c, _ := WithTimeout(Background(), 2*timeUnit) + o := otherContext{c} + c, cancel := WithTimeout(o, 4*timeUnit) + cancel() + time.Sleep(1 * timeUnit) // let cancelation propagate + select { + case <-c.Done(): + default: + t.Errorf("<-c.Done() blocked, but shouldn't have") + } + if e := c.Err(); e != Canceled { + t.Errorf("c.Err() == %v want %v", e, Canceled) + } +} + +type key1 int +type key2 int + +var k1 = key1(1) +var k2 = key2(1) // same int as k1, different type +var k3 = key2(3) // same type as k2, different int + +func TestValues(t *testing.T) { + check := func(c Context, nm, v1, v2, v3 string) { + if v, ok := c.Value(k1).(string); ok == (len(v1) == 0) || v != v1 { + t.Errorf(`%s.Value(k1).(string) = %q, %t want %q, %t`, nm, v, ok, v1, len(v1) != 0) + } + if v, ok := c.Value(k2).(string); ok == (len(v2) == 0) || v != v2 { + t.Errorf(`%s.Value(k2).(string) = %q, %t want %q, %t`, nm, v, ok, v2, len(v2) != 0) + } + if v, ok := c.Value(k3).(string); ok == (len(v3) == 0) || v != v3 { + t.Errorf(`%s.Value(k3).(string) = %q, %t want %q, %t`, nm, v, ok, v3, len(v3) != 0) + } + } + + c0 := Background() + check(c0, "c0", "", "", "") + + c1 := WithValue(Background(), k1, "c1k1") + check(c1, "c1", "c1k1", "", "") + + if got, want := fmt.Sprint(c1), `context.Background.WithValue(1, "c1k1")`; got != want { + t.Errorf("c.String() = %q want %q", got, want) + } + + c2 := WithValue(c1, k2, "c2k2") + check(c2, "c2", "c1k1", "c2k2", "") + + c3 := WithValue(c2, k3, "c3k3") + check(c3, "c2", "c1k1", "c2k2", "c3k3") + + c4 := WithValue(c3, k1, nil) + check(c4, "c4", "", "c2k2", "c3k3") + + o0 := otherContext{Background()} + check(o0, "o0", "", "", "") + + o1 := otherContext{WithValue(Background(), k1, "c1k1")} + check(o1, "o1", "c1k1", "", "") + + o2 := WithValue(o1, k2, "o2k2") + check(o2, "o2", "c1k1", "o2k2", "") + + o3 := otherContext{c4} + check(o3, "o3", "", "c2k2", "c3k3") + + o4 := WithValue(o3, k3, nil) + check(o4, "o4", "", "c2k2", "") +} + +func TestAllocs(t *testing.T) { + bg := Background() + for _, test := range []struct { + desc string + f func() + limit float64 + gccgoLimit float64 + }{ + { + desc: "Background()", + f: func() { Background() }, + limit: 0, + gccgoLimit: 0, + }, + { + desc: fmt.Sprintf("WithValue(bg, %v, nil)", k1), + f: func() { + c := WithValue(bg, k1, nil) + c.Value(k1) + }, + limit: 3, + gccgoLimit: 3, + }, + { + desc: "WithTimeout(bg, 15*time.Millisecond)", + f: func() { + c, _ := WithTimeout(bg, 15*time.Millisecond) + <-c.Done() + }, + limit: 8, + gccgoLimit: 16, + }, + { + desc: "WithCancel(bg)", + f: func() { + c, cancel := WithCancel(bg) + cancel() + <-c.Done() + }, + limit: 5, + gccgoLimit: 8, + }, + { + desc: "WithTimeout(bg, 100*time.Millisecond)", + f: func() { + c, cancel := WithTimeout(bg, 100*time.Millisecond) + cancel() + <-c.Done() + }, + limit: 8, + gccgoLimit: 25, + }, + } { + limit := test.limit + if runtime.Compiler == "gccgo" { + // gccgo does not yet do escape analysis. + // TODO(iant): Remove this when gccgo does do escape analysis. + limit = test.gccgoLimit + } + if n := testing.AllocsPerRun(100, test.f); n > limit { + t.Errorf("%s allocs = %f want %d", test.desc, n, int(limit)) + } + } +} + +func TestSimultaneousCancels(t *testing.T) { + root, cancel := WithCancel(Background()) + m := map[Context]CancelFunc{root: cancel} + q := []Context{root} + // Create a tree of contexts. + for len(q) != 0 && len(m) < 100 { + parent := q[0] + q = q[1:] + for i := 0; i < 4; i++ { + ctx, cancel := WithCancel(parent) + m[ctx] = cancel + q = append(q, ctx) + } + } + // Start all the cancels in a random order. + var wg sync.WaitGroup + wg.Add(len(m)) + for _, cancel := range m { + go func(cancel CancelFunc) { + cancel() + wg.Done() + }(cancel) + } + // Wait on all the contexts in a random order. + for ctx := range m { + select { + case <-ctx.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for <-ctx.Done(); stacks:\n%s", buf[:n]) + } + } + // Wait for all the cancel functions to return. + done := make(chan struct{}) + go func() { + wg.Wait() + close(done) + }() + select { + case <-done: + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for cancel functions; stacks:\n%s", buf[:n]) + } +} + +func TestInterlockedCancels(t *testing.T) { + parent, cancelParent := WithCancel(Background()) + child, cancelChild := WithCancel(parent) + go func() { + parent.Done() + cancelChild() + }() + cancelParent() + select { + case <-child.Done(): + case <-time.After(1 * time.Second): + buf := make([]byte, 10<<10) + n := runtime.Stack(buf, true) + t.Fatalf("timed out waiting for child.Done(); stacks:\n%s", buf[:n]) + } +} + +func TestLayersCancel(t *testing.T) { + testLayers(t, time.Now().UnixNano(), false) +} + +func TestLayersTimeout(t *testing.T) { + testLayers(t, time.Now().UnixNano(), true) +} + +func testLayers(t *testing.T, seed int64, testTimeout bool) { + rand.Seed(seed) + errorf := func(format string, a ...interface{}) { + t.Errorf(fmt.Sprintf("seed=%d: %s", seed, format), a...) + } + const ( + timeout = 200 * time.Millisecond + minLayers = 30 + ) + type value int + var ( + vals []*value + cancels []CancelFunc + numTimers int + ctx = Background() + ) + for i := 0; i < minLayers || numTimers == 0 || len(cancels) == 0 || len(vals) == 0; i++ { + switch rand.Intn(3) { + case 0: + v := new(value) + ctx = WithValue(ctx, v, v) + vals = append(vals, v) + case 1: + var cancel CancelFunc + ctx, cancel = WithCancel(ctx) + cancels = append(cancels, cancel) + case 2: + var cancel CancelFunc + ctx, cancel = WithTimeout(ctx, timeout) + cancels = append(cancels, cancel) + numTimers++ + } + } + checkValues := func(when string) { + for _, key := range vals { + if val := ctx.Value(key).(*value); key != val { + errorf("%s: ctx.Value(%p) = %p want %p", when, key, val, key) + } + } + } + select { + case <-ctx.Done(): + errorf("ctx should not be canceled yet") + default: + } + if s, prefix := fmt.Sprint(ctx), "context.Background."; !strings.HasPrefix(s, prefix) { + t.Errorf("ctx.String() = %q want prefix %q", s, prefix) + } + t.Log(ctx) + checkValues("before cancel") + if testTimeout { + select { + case <-ctx.Done(): + case <-time.After(timeout + 100*time.Millisecond): + errorf("ctx should have timed out") + } + checkValues("after timeout") + } else { + cancel := cancels[rand.Intn(len(cancels))] + cancel() + select { + case <-ctx.Done(): + default: + errorf("ctx should be canceled") + } + checkValues("after cancel") + } +} + +func TestCancelRemoves(t *testing.T) { + checkChildren := func(when string, ctx Context, want int) { + if got := len(ctx.(*cancelCtx).children); got != want { + t.Errorf("%s: context has %d children, want %d", when, got, want) + } + } + + ctx, _ := WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel := WithCancel(ctx) + checkChildren("with WithCancel child ", ctx, 1) + cancel() + checkChildren("after cancelling WithCancel child", ctx, 0) + + ctx, _ = WithCancel(Background()) + checkChildren("after creation", ctx, 0) + _, cancel = WithTimeout(ctx, 60*time.Minute) + checkChildren("with WithTimeout child ", ctx, 1) + cancel() + checkChildren("after cancelling WithTimeout child", ctx, 0) +} diff --git a/vendor/golang.org/x/net/context/go17.go b/vendor/golang.org/x/net/context/go17.go new file mode 100644 index 0000000..d20f52b --- /dev/null +++ b/vendor/golang.org/x/net/context/go17.go @@ -0,0 +1,72 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.7 + +package context + +import ( + "context" // standard library's context, as of Go 1.7 + "time" +) + +var ( + todo = context.TODO() + background = context.Background() +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = context.Canceled + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = context.DeadlineExceeded + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + ctx, f := context.WithCancel(parent) + return ctx, CancelFunc(f) +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + ctx, f := context.WithDeadline(parent, deadline) + return ctx, CancelFunc(f) +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return context.WithValue(parent, key, val) +} diff --git a/vendor/golang.org/x/net/context/go19.go b/vendor/golang.org/x/net/context/go19.go new file mode 100644 index 0000000..d88bd1d --- /dev/null +++ b/vendor/golang.org/x/net/context/go19.go @@ -0,0 +1,20 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.9 + +package context + +import "context" // standard library's context, as of Go 1.7 + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context = context.Context + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc = context.CancelFunc diff --git a/vendor/golang.org/x/net/context/pre_go17.go b/vendor/golang.org/x/net/context/pre_go17.go new file mode 100644 index 0000000..0f35592 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go17.go @@ -0,0 +1,300 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.7 + +package context + +import ( + "errors" + "fmt" + "sync" + "time" +) + +// An emptyCtx is never canceled, has no values, and has no deadline. It is not +// struct{}, since vars of this type must have distinct addresses. +type emptyCtx int + +func (*emptyCtx) Deadline() (deadline time.Time, ok bool) { + return +} + +func (*emptyCtx) Done() <-chan struct{} { + return nil +} + +func (*emptyCtx) Err() error { + return nil +} + +func (*emptyCtx) Value(key interface{}) interface{} { + return nil +} + +func (e *emptyCtx) String() string { + switch e { + case background: + return "context.Background" + case todo: + return "context.TODO" + } + return "unknown empty Context" +} + +var ( + background = new(emptyCtx) + todo = new(emptyCtx) +) + +// Canceled is the error returned by Context.Err when the context is canceled. +var Canceled = errors.New("context canceled") + +// DeadlineExceeded is the error returned by Context.Err when the context's +// deadline passes. +var DeadlineExceeded = errors.New("context deadline exceeded") + +// WithCancel returns a copy of parent with a new Done channel. The returned +// context's Done channel is closed when the returned cancel function is called +// or when the parent context's Done channel is closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithCancel(parent Context) (ctx Context, cancel CancelFunc) { + c := newCancelCtx(parent) + propagateCancel(parent, c) + return c, func() { c.cancel(true, Canceled) } +} + +// newCancelCtx returns an initialized cancelCtx. +func newCancelCtx(parent Context) *cancelCtx { + return &cancelCtx{ + Context: parent, + done: make(chan struct{}), + } +} + +// propagateCancel arranges for child to be canceled when parent is. +func propagateCancel(parent Context, child canceler) { + if parent.Done() == nil { + return // parent is never canceled + } + if p, ok := parentCancelCtx(parent); ok { + p.mu.Lock() + if p.err != nil { + // parent has already been canceled + child.cancel(false, p.err) + } else { + if p.children == nil { + p.children = make(map[canceler]bool) + } + p.children[child] = true + } + p.mu.Unlock() + } else { + go func() { + select { + case <-parent.Done(): + child.cancel(false, parent.Err()) + case <-child.Done(): + } + }() + } +} + +// parentCancelCtx follows a chain of parent references until it finds a +// *cancelCtx. This function understands how each of the concrete types in this +// package represents its parent. +func parentCancelCtx(parent Context) (*cancelCtx, bool) { + for { + switch c := parent.(type) { + case *cancelCtx: + return c, true + case *timerCtx: + return c.cancelCtx, true + case *valueCtx: + parent = c.Context + default: + return nil, false + } + } +} + +// removeChild removes a context from its parent. +func removeChild(parent Context, child canceler) { + p, ok := parentCancelCtx(parent) + if !ok { + return + } + p.mu.Lock() + if p.children != nil { + delete(p.children, child) + } + p.mu.Unlock() +} + +// A canceler is a context type that can be canceled directly. The +// implementations are *cancelCtx and *timerCtx. +type canceler interface { + cancel(removeFromParent bool, err error) + Done() <-chan struct{} +} + +// A cancelCtx can be canceled. When canceled, it also cancels any children +// that implement canceler. +type cancelCtx struct { + Context + + done chan struct{} // closed by the first cancel call. + + mu sync.Mutex + children map[canceler]bool // set to nil by the first cancel call + err error // set to non-nil by the first cancel call +} + +func (c *cancelCtx) Done() <-chan struct{} { + return c.done +} + +func (c *cancelCtx) Err() error { + c.mu.Lock() + defer c.mu.Unlock() + return c.err +} + +func (c *cancelCtx) String() string { + return fmt.Sprintf("%v.WithCancel", c.Context) +} + +// cancel closes c.done, cancels each of c's children, and, if +// removeFromParent is true, removes c from its parent's children. +func (c *cancelCtx) cancel(removeFromParent bool, err error) { + if err == nil { + panic("context: internal error: missing cancel error") + } + c.mu.Lock() + if c.err != nil { + c.mu.Unlock() + return // already canceled + } + c.err = err + close(c.done) + for child := range c.children { + // NOTE: acquiring the child's lock while holding parent's lock. + child.cancel(false, err) + } + c.children = nil + c.mu.Unlock() + + if removeFromParent { + removeChild(c.Context, c) + } +} + +// WithDeadline returns a copy of the parent context with the deadline adjusted +// to be no later than d. If the parent's deadline is already earlier than d, +// WithDeadline(parent, d) is semantically equivalent to parent. The returned +// context's Done channel is closed when the deadline expires, when the returned +// cancel function is called, or when the parent context's Done channel is +// closed, whichever happens first. +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete. +func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) { + if cur, ok := parent.Deadline(); ok && cur.Before(deadline) { + // The current deadline is already sooner than the new one. + return WithCancel(parent) + } + c := &timerCtx{ + cancelCtx: newCancelCtx(parent), + deadline: deadline, + } + propagateCancel(parent, c) + d := deadline.Sub(time.Now()) + if d <= 0 { + c.cancel(true, DeadlineExceeded) // deadline has already passed + return c, func() { c.cancel(true, Canceled) } + } + c.mu.Lock() + defer c.mu.Unlock() + if c.err == nil { + c.timer = time.AfterFunc(d, func() { + c.cancel(true, DeadlineExceeded) + }) + } + return c, func() { c.cancel(true, Canceled) } +} + +// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to +// implement Done and Err. It implements cancel by stopping its timer then +// delegating to cancelCtx.cancel. +type timerCtx struct { + *cancelCtx + timer *time.Timer // Under cancelCtx.mu. + + deadline time.Time +} + +func (c *timerCtx) Deadline() (deadline time.Time, ok bool) { + return c.deadline, true +} + +func (c *timerCtx) String() string { + return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now())) +} + +func (c *timerCtx) cancel(removeFromParent bool, err error) { + c.cancelCtx.cancel(false, err) + if removeFromParent { + // Remove this timerCtx from its parent cancelCtx's children. + removeChild(c.cancelCtx.Context, c) + } + c.mu.Lock() + if c.timer != nil { + c.timer.Stop() + c.timer = nil + } + c.mu.Unlock() +} + +// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)). +// +// Canceling this context releases resources associated with it, so code should +// call cancel as soon as the operations running in this Context complete: +// +// func slowOperationWithTimeout(ctx context.Context) (Result, error) { +// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond) +// defer cancel() // releases resources if slowOperation completes before timeout elapses +// return slowOperation(ctx) +// } +func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) { + return WithDeadline(parent, time.Now().Add(timeout)) +} + +// WithValue returns a copy of parent in which the value associated with key is +// val. +// +// Use context Values only for request-scoped data that transits processes and +// APIs, not for passing optional parameters to functions. +func WithValue(parent Context, key interface{}, val interface{}) Context { + return &valueCtx{parent, key, val} +} + +// A valueCtx carries a key-value pair. It implements Value for that key and +// delegates all other calls to the embedded Context. +type valueCtx struct { + Context + key, val interface{} +} + +func (c *valueCtx) String() string { + return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val) +} + +func (c *valueCtx) Value(key interface{}) interface{} { + if c.key == key { + return c.val + } + return c.Context.Value(key) +} diff --git a/vendor/golang.org/x/net/context/pre_go19.go b/vendor/golang.org/x/net/context/pre_go19.go new file mode 100644 index 0000000..b105f80 --- /dev/null +++ b/vendor/golang.org/x/net/context/pre_go19.go @@ -0,0 +1,109 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.9 + +package context + +import "time" + +// A Context carries a deadline, a cancelation signal, and other values across +// API boundaries. +// +// Context's methods may be called by multiple goroutines simultaneously. +type Context interface { + // Deadline returns the time when work done on behalf of this context + // should be canceled. Deadline returns ok==false when no deadline is + // set. Successive calls to Deadline return the same results. + Deadline() (deadline time.Time, ok bool) + + // Done returns a channel that's closed when work done on behalf of this + // context should be canceled. Done may return nil if this context can + // never be canceled. Successive calls to Done return the same value. + // + // WithCancel arranges for Done to be closed when cancel is called; + // WithDeadline arranges for Done to be closed when the deadline + // expires; WithTimeout arranges for Done to be closed when the timeout + // elapses. + // + // Done is provided for use in select statements: + // + // // Stream generates values with DoSomething and sends them to out + // // until DoSomething returns an error or ctx.Done is closed. + // func Stream(ctx context.Context, out chan<- Value) error { + // for { + // v, err := DoSomething(ctx) + // if err != nil { + // return err + // } + // select { + // case <-ctx.Done(): + // return ctx.Err() + // case out <- v: + // } + // } + // } + // + // See http://blog.golang.org/pipelines for more examples of how to use + // a Done channel for cancelation. + Done() <-chan struct{} + + // Err returns a non-nil error value after Done is closed. Err returns + // Canceled if the context was canceled or DeadlineExceeded if the + // context's deadline passed. No other values for Err are defined. + // After Done is closed, successive calls to Err return the same value. + Err() error + + // Value returns the value associated with this context for key, or nil + // if no value is associated with key. Successive calls to Value with + // the same key returns the same result. + // + // Use context values only for request-scoped data that transits + // processes and API boundaries, not for passing optional parameters to + // functions. + // + // A key identifies a specific value in a Context. Functions that wish + // to store values in Context typically allocate a key in a global + // variable then use that key as the argument to context.WithValue and + // Context.Value. A key can be any type that supports equality; + // packages should define keys as an unexported type to avoid + // collisions. + // + // Packages that define a Context key should provide type-safe accessors + // for the values stores using that key: + // + // // Package user defines a User type that's stored in Contexts. + // package user + // + // import "golang.org/x/net/context" + // + // // User is the type of value stored in the Contexts. + // type User struct {...} + // + // // key is an unexported type for keys defined in this package. + // // This prevents collisions with keys defined in other packages. + // type key int + // + // // userKey is the key for user.User values in Contexts. It is + // // unexported; clients use user.NewContext and user.FromContext + // // instead of using this key directly. + // var userKey key = 0 + // + // // NewContext returns a new Context that carries value u. + // func NewContext(ctx context.Context, u *User) context.Context { + // return context.WithValue(ctx, userKey, u) + // } + // + // // FromContext returns the User value stored in ctx, if any. + // func FromContext(ctx context.Context) (*User, bool) { + // u, ok := ctx.Value(userKey).(*User) + // return u, ok + // } + Value(key interface{}) interface{} +} + +// A CancelFunc tells an operation to abandon its work. +// A CancelFunc does not wait for the work to stop. +// After the first call, subsequent calls to a CancelFunc do nothing. +type CancelFunc func() diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go new file mode 100644 index 0000000..938feaf --- /dev/null +++ b/vendor/golang.org/x/time/rate/rate.go @@ -0,0 +1,371 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package rate provides a rate limiter. +package rate + +import ( + "fmt" + "math" + "sync" + "time" + + "golang.org/x/net/context" +) + +// Limit defines the maximum frequency of some events. +// Limit is represented as number of events per second. +// A zero Limit allows no events. +type Limit float64 + +// Inf is the infinite rate limit; it allows all events (even if burst is zero). +const Inf = Limit(math.MaxFloat64) + +// Every converts a minimum time interval between events to a Limit. +func Every(interval time.Duration) Limit { + if interval <= 0 { + return Inf + } + return 1 / Limit(interval.Seconds()) +} + +// A Limiter controls how frequently events are allowed to happen. +// It implements a "token bucket" of size b, initially full and refilled +// at rate r tokens per second. +// Informally, in any large enough time interval, the Limiter limits the +// rate to r tokens per second, with a maximum burst size of b events. +// As a special case, if r == Inf (the infinite rate), b is ignored. +// See https://en.wikipedia.org/wiki/Token_bucket for more about token buckets. +// +// The zero value is a valid Limiter, but it will reject all events. +// Use NewLimiter to create non-zero Limiters. +// +// Limiter has three main methods, Allow, Reserve, and Wait. +// Most callers should use Wait. +// +// Each of the three methods consumes a single token. +// They differ in their behavior when no token is available. +// If no token is available, Allow returns false. +// If no token is available, Reserve returns a reservation for a future token +// and the amount of time the caller must wait before using it. +// If no token is available, Wait blocks until one can be obtained +// or its associated context.Context is canceled. +// +// The methods AllowN, ReserveN, and WaitN consume n tokens. +type Limiter struct { + limit Limit + burst int + + mu sync.Mutex + tokens float64 + // last is the last time the limiter's tokens field was updated + last time.Time + // lastEvent is the latest time of a rate-limited event (past or future) + lastEvent time.Time +} + +// Limit returns the maximum overall event rate. +func (lim *Limiter) Limit() Limit { + lim.mu.Lock() + defer lim.mu.Unlock() + return lim.limit +} + +// Burst returns the maximum burst size. Burst is the maximum number of tokens +// that can be consumed in a single call to Allow, Reserve, or Wait, so higher +// Burst values allow more events to happen at once. +// A zero Burst allows no events, unless limit == Inf. +func (lim *Limiter) Burst() int { + return lim.burst +} + +// NewLimiter returns a new Limiter that allows events up to rate r and permits +// bursts of at most b tokens. +func NewLimiter(r Limit, b int) *Limiter { + return &Limiter{ + limit: r, + burst: b, + } +} + +// Allow is shorthand for AllowN(time.Now(), 1). +func (lim *Limiter) Allow() bool { + return lim.AllowN(time.Now(), 1) +} + +// AllowN reports whether n events may happen at time now. +// Use this method if you intend to drop / skip events that exceed the rate limit. +// Otherwise use Reserve or Wait. +func (lim *Limiter) AllowN(now time.Time, n int) bool { + return lim.reserveN(now, n, 0).ok +} + +// A Reservation holds information about events that are permitted by a Limiter to happen after a delay. +// A Reservation may be canceled, which may enable the Limiter to permit additional events. +type Reservation struct { + ok bool + lim *Limiter + tokens int + timeToAct time.Time + // This is the Limit at reservation time, it can change later. + limit Limit +} + +// OK returns whether the limiter can provide the requested number of tokens +// within the maximum wait time. If OK is false, Delay returns InfDuration, and +// Cancel does nothing. +func (r *Reservation) OK() bool { + return r.ok +} + +// Delay is shorthand for DelayFrom(time.Now()). +func (r *Reservation) Delay() time.Duration { + return r.DelayFrom(time.Now()) +} + +// InfDuration is the duration returned by Delay when a Reservation is not OK. +const InfDuration = time.Duration(1<<63 - 1) + +// DelayFrom returns the duration for which the reservation holder must wait +// before taking the reserved action. Zero duration means act immediately. +// InfDuration means the limiter cannot grant the tokens requested in this +// Reservation within the maximum wait time. +func (r *Reservation) DelayFrom(now time.Time) time.Duration { + if !r.ok { + return InfDuration + } + delay := r.timeToAct.Sub(now) + if delay < 0 { + return 0 + } + return delay +} + +// Cancel is shorthand for CancelAt(time.Now()). +func (r *Reservation) Cancel() { + r.CancelAt(time.Now()) + return +} + +// CancelAt indicates that the reservation holder will not perform the reserved action +// and reverses the effects of this Reservation on the rate limit as much as possible, +// considering that other reservations may have already been made. +func (r *Reservation) CancelAt(now time.Time) { + if !r.ok { + return + } + + r.lim.mu.Lock() + defer r.lim.mu.Unlock() + + if r.lim.limit == Inf || r.tokens == 0 || r.timeToAct.Before(now) { + return + } + + // calculate tokens to restore + // The duration between lim.lastEvent and r.timeToAct tells us how many tokens were reserved + // after r was obtained. These tokens should not be restored. + restoreTokens := float64(r.tokens) - r.limit.tokensFromDuration(r.lim.lastEvent.Sub(r.timeToAct)) + if restoreTokens <= 0 { + return + } + // advance time to now + now, _, tokens := r.lim.advance(now) + // calculate new number of tokens + tokens += restoreTokens + if burst := float64(r.lim.burst); tokens > burst { + tokens = burst + } + // update state + r.lim.last = now + r.lim.tokens = tokens + if r.timeToAct == r.lim.lastEvent { + prevEvent := r.timeToAct.Add(r.limit.durationFromTokens(float64(-r.tokens))) + if !prevEvent.Before(now) { + r.lim.lastEvent = prevEvent + } + } + + return +} + +// Reserve is shorthand for ReserveN(time.Now(), 1). +func (lim *Limiter) Reserve() *Reservation { + return lim.ReserveN(time.Now(), 1) +} + +// ReserveN returns a Reservation that indicates how long the caller must wait before n events happen. +// The Limiter takes this Reservation into account when allowing future events. +// ReserveN returns false if n exceeds the Limiter's burst size. +// Usage example: +// r := lim.ReserveN(time.Now(), 1) +// if !r.OK() { +// // Not allowed to act! Did you remember to set lim.burst to be > 0 ? +// return +// } +// time.Sleep(r.Delay()) +// Act() +// Use this method if you wish to wait and slow down in accordance with the rate limit without dropping events. +// If you need to respect a deadline or cancel the delay, use Wait instead. +// To drop or skip events exceeding rate limit, use Allow instead. +func (lim *Limiter) ReserveN(now time.Time, n int) *Reservation { + r := lim.reserveN(now, n, InfDuration) + return &r +} + +// Wait is shorthand for WaitN(ctx, 1). +func (lim *Limiter) Wait(ctx context.Context) (err error) { + return lim.WaitN(ctx, 1) +} + +// WaitN blocks until lim permits n events to happen. +// It returns an error if n exceeds the Limiter's burst size, the Context is +// canceled, or the expected wait time exceeds the Context's Deadline. +// The burst limit is ignored if the rate limit is Inf. +func (lim *Limiter) WaitN(ctx context.Context, n int) (err error) { + if n > lim.burst && lim.limit != Inf { + return fmt.Errorf("rate: Wait(n=%d) exceeds limiter's burst %d", n, lim.burst) + } + // Check if ctx is already cancelled + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + // Determine wait limit + now := time.Now() + waitLimit := InfDuration + if deadline, ok := ctx.Deadline(); ok { + waitLimit = deadline.Sub(now) + } + // Reserve + r := lim.reserveN(now, n, waitLimit) + if !r.ok { + return fmt.Errorf("rate: Wait(n=%d) would exceed context deadline", n) + } + // Wait + t := time.NewTimer(r.DelayFrom(now)) + defer t.Stop() + select { + case <-t.C: + // We can proceed. + return nil + case <-ctx.Done(): + // Context was canceled before we could proceed. Cancel the + // reservation, which may permit other events to proceed sooner. + r.Cancel() + return ctx.Err() + } +} + +// SetLimit is shorthand for SetLimitAt(time.Now(), newLimit). +func (lim *Limiter) SetLimit(newLimit Limit) { + lim.SetLimitAt(time.Now(), newLimit) +} + +// SetLimitAt sets a new Limit for the limiter. The new Limit, and Burst, may be violated +// or underutilized by those which reserved (using Reserve or Wait) but did not yet act +// before SetLimitAt was called. +func (lim *Limiter) SetLimitAt(now time.Time, newLimit Limit) { + lim.mu.Lock() + defer lim.mu.Unlock() + + now, _, tokens := lim.advance(now) + + lim.last = now + lim.tokens = tokens + lim.limit = newLimit +} + +// reserveN is a helper method for AllowN, ReserveN, and WaitN. +// maxFutureReserve specifies the maximum reservation wait duration allowed. +// reserveN returns Reservation, not *Reservation, to avoid allocation in AllowN and WaitN. +func (lim *Limiter) reserveN(now time.Time, n int, maxFutureReserve time.Duration) Reservation { + lim.mu.Lock() + + if lim.limit == Inf { + lim.mu.Unlock() + return Reservation{ + ok: true, + lim: lim, + tokens: n, + timeToAct: now, + } + } + + now, last, tokens := lim.advance(now) + + // Calculate the remaining number of tokens resulting from the request. + tokens -= float64(n) + + // Calculate the wait duration + var waitDuration time.Duration + if tokens < 0 { + waitDuration = lim.limit.durationFromTokens(-tokens) + } + + // Decide result + ok := n <= lim.burst && waitDuration <= maxFutureReserve + + // Prepare reservation + r := Reservation{ + ok: ok, + lim: lim, + limit: lim.limit, + } + if ok { + r.tokens = n + r.timeToAct = now.Add(waitDuration) + } + + // Update state + if ok { + lim.last = now + lim.tokens = tokens + lim.lastEvent = r.timeToAct + } else { + lim.last = last + } + + lim.mu.Unlock() + return r +} + +// advance calculates and returns an updated state for lim resulting from the passage of time. +// lim is not changed. +func (lim *Limiter) advance(now time.Time) (newNow time.Time, newLast time.Time, newTokens float64) { + last := lim.last + if now.Before(last) { + last = now + } + + // Avoid making delta overflow below when last is very old. + maxElapsed := lim.limit.durationFromTokens(float64(lim.burst) - lim.tokens) + elapsed := now.Sub(last) + if elapsed > maxElapsed { + elapsed = maxElapsed + } + + // Calculate the new number of tokens, due to time that passed. + delta := lim.limit.tokensFromDuration(elapsed) + tokens := lim.tokens + delta + if burst := float64(lim.burst); tokens > burst { + tokens = burst + } + + return now, last, tokens +} + +// durationFromTokens is a unit conversion function from the number of tokens to the duration +// of time it takes to accumulate them at a rate of limit tokens per second. +func (limit Limit) durationFromTokens(tokens float64) time.Duration { + seconds := tokens / float64(limit) + return time.Nanosecond * time.Duration(1e9*seconds) +} + +// tokensFromDuration is a unit conversion function from a time duration to the number of tokens +// which could be accumulated during that duration at a rate of limit tokens per second. +func (limit Limit) tokensFromDuration(d time.Duration) float64 { + return d.Seconds() * float64(limit) +}