initial commit

This commit is contained in:
Cadey Ratio 2017-08-25 01:22:58 -07:00
commit 931404890e
No known key found for this signature in database
GPG Key ID: D607EE27C2E7F89A
317 changed files with 63658 additions and 0 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
.env
vyvanse

198
bot/bot.go Normal file
View File

@ -0,0 +1,198 @@
package bot
import (
"context"
"errors"
"log"
"strings"
"sync"
"time"
"github.com/bwmarrin/discordgo"
opentracing "github.com/opentracing/opentracing-go"
otlog "github.com/opentracing/opentracing-go/log"
"golang.org/x/time/rate"
)
var (
ErrRateLimitExceeded = errors.New("bot: per-command rate limit exceeded")
)
type command struct {
aliases []string
verb string
helptext string
}
func (c *command) Verb() string {
return c.verb
}
func (c *command) Helptext() string {
return c.helptext
}
// Handler is the type that bot command functions need to implement. Errors
// should be returned.
type Handler func(context.Context, *discordgo.Session, *discordgo.Message, []string) error
// CommandHandler is a generic interface for types that implement a bot
// command. It is akin to http.Handler, but more comprehensive.
type CommandHandler interface {
Verb() string
Helptext() string
Handler(context.Context, *discordgo.Session, *discordgo.Message, []string) error
Permissions(context.Context, *discordgo.Session, *discordgo.Message, []string) error
}
type basicCommand struct {
*command
handler Handler
permissions Handler
limiter *rate.Limiter
}
func (bc *basicCommand) Handler(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
return bc.handler(ctx, s, m, parv)
}
func (bc *basicCommand) Permissions(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
if !bc.limiter.Allow() {
return ErrRateLimitExceeded
}
return bc.permissions(ctx, s, m, parv)
}
// The "default" command set, useful for simple bot projects.
var (
DefaultCommandSet = NewCommandSet()
)
// Command handling errors.
var (
ErrAlreadyExists = errors.New("bot: command already exists")
ErrNoSuchCommand = errors.New("bot: no such command exists")
ErrNoPermissions = errors.New("bot: you do not have permissions for this command")
ErrParvCountMismatch = errors.New("bot: parameter count mismatch")
)
// The default command prefix. Command `foo` becomes `.foo` in chat, etc.
const (
DefaultPrefix = "."
)
// NewCommand creates an anonymous command and adds it to the default CommandSet.
func NewCommand(verb, helptext string, handler, permissions Handler) error {
return DefaultCommandSet.Add(NewBasicCommand(verb, helptext, handler, permissions))
}
// NewBasicCommand creates a CommandHandler instance using the implementation
// functions supplied as arguments.
func NewBasicCommand(verb, helptext string, permissions, handler Handler) CommandHandler {
return &basicCommand{
command: &command{
verb: verb,
helptext: helptext,
},
handler: handler,
permissions: permissions,
limiter: rate.NewLimiter(rate.Every(5*time.Second), 1),
}
}
// CommandSet is a group of bot commands similar to an http.ServeMux.
type CommandSet struct {
sync.Mutex
cmds map[string]CommandHandler
Prefix string
}
// NewCommandSet creates a new command set with the `help` command pre-loaded.
func NewCommandSet() *CommandSet {
cs := &CommandSet{
cmds: map[string]CommandHandler{},
Prefix: DefaultPrefix,
}
cs.AddCmd("help", "Shows help for the bot", NoPermissions, cs.help)
return cs
}
// NoPermissions is a simple middelware function that allows all command invocations
// to pass the permissions check.
func NoPermissions(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
return nil
}
// AddCmd is syntactic sugar for cs.Add(NewBasicCommand(args...))
func (cs *CommandSet) AddCmd(verb, helptext string, permissions, handler Handler) error {
return cs.Add(NewBasicCommand(verb, helptext, permissions, handler))
}
// Add adds a single command handler to the CommandSet. This can be done at runtime
// but it is suggested to only add commands on application boot.
func (cs *CommandSet) Add(h CommandHandler) error {
cs.Lock()
defer cs.Unlock()
v := strings.ToLower(h.Verb())
if _, ok := cs.cmds[v]; ok {
return ErrAlreadyExists
}
cs.cmds[v] = h
return nil
}
// Run makes a CommandSet compatible with discordgo event dispatching.
func (cs *CommandSet) Run(s *discordgo.Session, msg *discordgo.Message) error {
cs.Lock()
defer cs.Unlock()
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
sp, ctx := opentracing.StartSpanFromContext(ctx, "CommandSet.Run")
defer sp.Finish()
if strings.HasPrefix(msg.Content, cs.Prefix) {
params := strings.Fields(msg.Content)
verb := strings.ToLower(params[0][1:])
sp.LogFields(
otlog.String("message-id", msg.ID),
otlog.String("author", msg.Author.ID),
otlog.String("channel-id", msg.ChannelID),
otlog.String("verb", verb),
otlog.Int("parc", len(params)),
)
cmd, ok := cs.cmds[verb]
if !ok {
return ErrNoSuchCommand
}
err := cmd.Permissions(ctx, s, msg, params)
if err != nil {
sp.LogFields(otlog.Error(err))
log.Printf("Permissions error: %s: %v", msg.Author.Username, err)
s.ChannelMessageSend(msg.ChannelID, "You don't have permissions for that, sorry.")
return ErrNoPermissions
}
err = cmd.Handler(ctx, s, msg, params)
if err != nil {
log.Printf("command handler error: %v", err)
s.ChannelMessageSend(msg.ChannelID, "error when running that command: "+err.Error())
return err
}
}
return nil
}

10
bot/doc.go Normal file
View File

@ -0,0 +1,10 @@
/*
Package bot contains some generically useful bot rigging for Discord chatbots.
This package works by defining command handlers in a CommandSet, and then dispatching
based on message contents. If the bot's command prefix is `;`, then `;foo` activates
the command handler for command `foo`.
A CommandSet has a mutex baked into it for convenience of command implementation.
*/
package bot

29
bot/help.go Normal file
View File

@ -0,0 +1,29 @@
package bot
import (
"context"
"fmt"
"github.com/bwmarrin/discordgo"
)
func (cs *CommandSet) help(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
switch len(parv) {
case 1:
// print all help on all commands
result := "Bot commands: \n"
for verb, cmd := range cs.cmds {
result += fmt.Sprintf("%s%s: %s\n", cs.Prefix, verb, cmd.Helptext())
}
result += "If there's any problems please don't hesitate to ask a server admin for help."
s.ChannelMessageSend(m.ChannelID, result)
default:
return ErrParvCountMismatch
}
return nil
}

32
box.rb Normal file
View File

@ -0,0 +1,32 @@
$repo = "git.xeserv.us/xena/vyvanse"
$gover = "1.8.3"
from "xena/go-mini:#{$gover}"
run "go#{$gover} download"
def foldercopy(dir)
copy "#{dir}", "/root/go/src/#{$repo}/#{dir}"
end
def gobuild(pkg)
run "go#{$gover} build #{$repo}/#{pkg} && go#{$gover} install #{$repo}/#{pkg}"
end
folders = [
"bot",
"cmd",
"vendor",
"vendor-log"
]
folders.each { |x| foldercopy x }
gobuild "cmd/vyvanse"
cmd "/root/go/bin/vyvanse"
run "rm -rf $HOME/sdk /root/go/pkg"
run "apk del go#{$gover}"
flatten
tag "xena/vyvanse"

7
config.nims Normal file
View File

@ -0,0 +1,7 @@
task up, "starts docker-compose services":
exec "box box.rb"
exec "docker-compose up -d"
task down, "stops docker-compose services":
exec "docker-compose down"

15
docker-compose.yml Normal file
View File

@ -0,0 +1,15 @@
version: "3"
services:
zipkin:
image: openzipkin/zipkin
environment:
STORAGE_TYPE: mem
ports:
- "9411:9411"
vyvanse:
image: xena/vyvanse
env_file: ./.env
depends_on:
- zipkin

41
vendor-log Normal file
View File

@ -0,0 +1,41 @@
55b3f6a77f08038b1e82c3db3c1ce619e46de9b6 github.com/Shopify/sarama
905d0b5876706403fc746bc18dee25d2fecc2518 github.com/Xe/ln
ec64f23d236d7874e3b28ae86c833f57c7aa3389 github.com/apache/thrift/lib/go/thrift
d420e28024ad527390b43aa7f64e029083e11989 github.com/bwmarrin/discordgo
adab96458c51a58dc1783b3335dcce5461522e75 github.com/davecgh/go-spew/spew
b1fe83b5b03f624450823b751b662259ffc6af70 github.com/eapache/go-resiliency/breaker
bb955e01b9346ac19dc29eb16586c90ded99a98c github.com/eapache/go-xerial-snappy
44cc805cf13205b55f69e14bcb69867d1ae92f98 github.com/eapache/queue
390ab7935ee28ec6b286364bba9b4dd6410cb3d5 github.com/go-logfmt/logfmt
fcdc5011193ff531a548e9b0301828d5a5b97fd8 github.com/gogo/protobuf/proto
553a641470496b2327abcac10b36396bd98e45c9 github.com/golang/snappy
806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/agent
806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/internal
806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/signal
a69d9f6de432e2c6b296a947d8a5ee88f68522cf github.com/gorilla/websocket
9d9ddadf44b4c17c42bafdc530ddeee1927c067d github.com/joho/godotenv
9d9ddadf44b4c17c42bafdc530ddeee1927c067d github.com/joho/godotenv/autoload
67f268f20922975c067ed799e4be6bacf152208c github.com/namsral/flag
9497d909de390b9a8dd0d88975cf0ce7d0b4d688 github.com/nishanths/go-xkcd
a52f2342449246d5bcc273e65cbdcfa5f7d6c63c github.com/opentracing-contrib/go-observer
8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go
8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go/ext
8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go/log
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/flag
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/types
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/wire
5a3d2245f97fc249850e7802e3c01fad02a1c316 github.com/pierrec/lz4
a0006b13c722f7f12368c00a3d3c2ae8a999a0c6 github.com/pierrec/xxHash/xxHash32
c605e284fe17294bda444b34710735b29d1a9d90 github.com/pkg/errors
1f30fe9094a513ce4c700b9a54458bbb0c96996c github.com/rcrowley/go-metrics
736158dc09e10f1911ca3a1e1b01f11b566ce5db github.com/robfig/cron
42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/nacl/secretbox
42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/poly1305
42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/salsa20/salsa
f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f golang.org/x/net/context
f51c12702a4d776e4c1fa9b0fabab841babae631 golang.org/x/time/rate
ae77be60afb1dcacde03767a8c37337fad28ac14 github.com/kardianos/osext
6a18b51d929caddbe795e1609195dee1d1cc729e github.com/justinian/dice

View File

@ -0,0 +1,24 @@
package sarama
type ApiVersionsRequest struct {
}
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ApiVersionsRequest) key() int16 {
return 18
}
func (r *ApiVersionsRequest) version() int16 {
return 0
}
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -0,0 +1,87 @@
package sarama
type ApiVersionsResponseBlock struct {
ApiKey int16
MinVersion int16
MaxVersion int16
}
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
pe.putInt16(b.ApiKey)
pe.putInt16(b.MinVersion)
pe.putInt16(b.MaxVersion)
return nil
}
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
var err error
if b.ApiKey, err = pd.getInt16(); err != nil {
return err
}
if b.MinVersion, err = pd.getInt16(); err != nil {
return err
}
if b.MaxVersion, err = pd.getInt16(); err != nil {
return err
}
return nil
}
type ApiVersionsResponse struct {
Err KError
ApiVersions []*ApiVersionsResponseBlock
}
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
return err
}
for _, apiVersion := range r.ApiVersions {
if err := apiVersion.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
for i := 0; i < numBlocks; i++ {
block := new(ApiVersionsResponseBlock)
if err := block.decode(pd); err != nil {
return err
}
r.ApiVersions[i] = block
}
return nil
}
func (r *ApiVersionsResponse) key() int16 {
return 18
}
func (r *ApiVersionsResponse) version() int16 {
return 0
}
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

904
vendor/github.com/Shopify/sarama/async_producer.go generated vendored Normal file
View File

@ -0,0 +1,904 @@
package sarama
import (
"fmt"
"sync"
"time"
"github.com/eapache/go-resiliency/breaker"
"github.com/eapache/queue"
)
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
// and parses responses for errors. You must read from the Errors() channel or the
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
// leaks: it will not be garbage-collected automatically when it passes out of
// scope.
type AsyncProducer interface {
// AsyncClose triggers a shutdown of the producer. The shutdown has completed
// when both the Errors and Successes channels have been closed. When calling
// AsyncClose, you *must* continue to read from those channels in order to
// drain the results of any messages in flight.
AsyncClose()
// Close shuts down the producer and waits for any buffered messages to be
// flushed. You must call this function before a producer object passes out of
// scope, as it may otherwise leak memory. You must call this before calling
// Close on the underlying client.
Close() error
// Input is the input channel for the user to write messages to that they
// wish to send.
Input() chan<- *ProducerMessage
// Successes is the success output channel back to the user when Return.Successes is
// enabled. If Return.Successes is true, you MUST read from this channel or the
// Producer will deadlock. It is suggested that you send and read messages
// together in a single select statement.
Successes() <-chan *ProducerMessage
// Errors is the error output channel back to the user. You MUST read from this
// channel or the Producer will deadlock when the channel is full. Alternatively,
// you can set Producer.Return.Errors in your config to false, which prevents
// errors to be returned.
Errors() <-chan *ProducerError
}
type asyncProducer struct {
client Client
conf *Config
ownClient bool
errors chan *ProducerError
input, successes, retries chan *ProducerMessage
inFlight sync.WaitGroup
brokers map[*Broker]chan<- *ProducerMessage
brokerRefs map[chan<- *ProducerMessage]int
brokerLock sync.Mutex
}
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
client, err := NewClient(addrs, conf)
if err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
p.(*asyncProducer).ownClient = true
return p, nil
}
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
p := &asyncProducer{
client: client,
conf: client.Config(),
errors: make(chan *ProducerError),
input: make(chan *ProducerMessage),
successes: make(chan *ProducerMessage),
retries: make(chan *ProducerMessage),
brokers: make(map[*Broker]chan<- *ProducerMessage),
brokerRefs: make(map[chan<- *ProducerMessage]int),
}
// launch our singleton dispatchers
go withRecover(p.dispatcher)
go withRecover(p.retryHandler)
return p, nil
}
type flagSet int8
const (
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
fin // final message from partitionProducer to brokerProducer and back
shutdown // start the shutdown process
)
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
type ProducerMessage struct {
Topic string // The Kafka topic for this message.
// The partitioning key for this message. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Key Encoder
// The actual message to store in Kafka. Pre-existing Encoders include
// StringEncoder and ByteEncoder.
Value Encoder
// This field is used to hold arbitrary data you wish to include so it
// will be available when receiving on the Successes and Errors channels.
// Sarama completely ignores this field and is only to be used for
// pass-through data.
Metadata interface{}
// Below this point are filled in by the producer as the message is processed
// Offset is the offset of the message stored on the broker. This is only
// guaranteed to be defined if the message was successfully delivered and
// RequiredAcks is not NoResponse.
Offset int64
// Partition is the partition that the message was sent to. This is only
// guaranteed to be defined if the message was successfully delivered.
Partition int32
// Timestamp is the timestamp assigned to the message by the broker. This
// is only guaranteed to be defined if the message was successfully
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
// least version 0.10.0.
Timestamp time.Time
retries int
flags flagSet
}
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
func (m *ProducerMessage) byteSize() int {
size := producerMessageOverhead
if m.Key != nil {
size += m.Key.Length()
}
if m.Value != nil {
size += m.Value.Length()
}
return size
}
func (m *ProducerMessage) clear() {
m.flags = 0
m.retries = 0
}
// ProducerError is the type of error generated when the producer fails to deliver a message.
// It contains the original ProducerMessage as well as the actual error value.
type ProducerError struct {
Msg *ProducerMessage
Err error
}
func (pe ProducerError) Error() string {
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
}
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
// when closing a producer.
type ProducerErrors []*ProducerError
func (pe ProducerErrors) Error() string {
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
}
func (p *asyncProducer) Errors() <-chan *ProducerError {
return p.errors
}
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
return p.successes
}
func (p *asyncProducer) Input() chan<- *ProducerMessage {
return p.input
}
func (p *asyncProducer) Close() error {
p.AsyncClose()
if p.conf.Producer.Return.Successes {
go withRecover(func() {
for range p.successes {
}
})
}
var errors ProducerErrors
if p.conf.Producer.Return.Errors {
for event := range p.errors {
errors = append(errors, event)
}
} else {
<-p.errors
}
if len(errors) > 0 {
return errors
}
return nil
}
func (p *asyncProducer) AsyncClose() {
go withRecover(p.shutdown)
}
// singleton
// dispatches messages by topic
func (p *asyncProducer) dispatcher() {
handlers := make(map[string]chan<- *ProducerMessage)
shuttingDown := false
for msg := range p.input {
if msg == nil {
Logger.Println("Something tried to send a nil message, it was ignored.")
continue
}
if msg.flags&shutdown != 0 {
shuttingDown = true
p.inFlight.Done()
continue
} else if msg.retries == 0 {
if shuttingDown {
// we can't just call returnError here because that decrements the wait group,
// which hasn't been incremented yet for this message, and shouldn't be
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
continue
}
p.inFlight.Add(1)
}
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
p.returnError(msg, ErrMessageSizeTooLarge)
continue
}
handler := handlers[msg.Topic]
if handler == nil {
handler = p.newTopicProducer(msg.Topic)
handlers[msg.Topic] = handler
}
handler <- msg
}
for _, handler := range handlers {
close(handler)
}
}
// one per topic
// partitions messages, then dispatches them by partition
type topicProducer struct {
parent *asyncProducer
topic string
input <-chan *ProducerMessage
breaker *breaker.Breaker
handlers map[int32]chan<- *ProducerMessage
partitioner Partitioner
}
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
tp := &topicProducer{
parent: p,
topic: topic,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
handlers: make(map[int32]chan<- *ProducerMessage),
partitioner: p.conf.Producer.Partitioner(topic),
}
go withRecover(tp.dispatch)
return input
}
func (tp *topicProducer) dispatch() {
for msg := range tp.input {
if msg.retries == 0 {
if err := tp.partitionMessage(msg); err != nil {
tp.parent.returnError(msg, err)
continue
}
}
handler := tp.handlers[msg.Partition]
if handler == nil {
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
tp.handlers[msg.Partition] = handler
}
handler <- msg
}
for _, handler := range tp.handlers {
close(handler)
}
}
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
var partitions []int32
err := tp.breaker.Run(func() (err error) {
if tp.partitioner.RequiresConsistency() {
partitions, err = tp.parent.client.Partitions(msg.Topic)
} else {
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
}
return
})
if err != nil {
return err
}
numPartitions := int32(len(partitions))
if numPartitions == 0 {
return ErrLeaderNotAvailable
}
choice, err := tp.partitioner.Partition(msg, numPartitions)
if err != nil {
return err
} else if choice < 0 || choice >= numPartitions {
return ErrInvalidPartition
}
msg.Partition = partitions[choice]
return nil
}
// one per partition per topic
// dispatches messages to the appropriate broker
// also responsible for maintaining message order during retries
type partitionProducer struct {
parent *asyncProducer
topic string
partition int32
input <-chan *ProducerMessage
leader *Broker
breaker *breaker.Breaker
output chan<- *ProducerMessage
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
// therefore whether our buffer is complete and safe to flush)
highWatermark int
retryState []partitionRetryState
}
type partitionRetryState struct {
buf []*ProducerMessage
expectChaser bool
}
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
pp := &partitionProducer{
parent: p,
topic: topic,
partition: partition,
input: input,
breaker: breaker.New(3, 1, 10*time.Second),
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
}
go withRecover(pp.dispatch)
return input
}
func (pp *partitionProducer) dispatch() {
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
// on the first message
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
if pp.leader != nil {
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
}
for msg := range pp.input {
if msg.retries > pp.highWatermark {
// a new, higher, retry level; handle it and then back off
pp.newHighWatermark(msg.retries)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
} else if pp.highWatermark > 0 {
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
if msg.retries < pp.highWatermark {
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
if msg.flags&fin == fin {
pp.retryState[msg.retries].expectChaser = false
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
} else {
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
}
continue
} else if msg.flags&fin == fin {
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
// meaning this retry level is done and we can go down (at least) one level and flush that
pp.retryState[pp.highWatermark].expectChaser = false
pp.flushRetryBuffers()
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
continue
}
}
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
// without breaking any of our ordering guarantees
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnError(msg, err)
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
continue
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
pp.output <- msg
}
if pp.output != nil {
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
}
}
func (pp *partitionProducer) newHighWatermark(hwm int) {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
pp.highWatermark = hwm
// send off a fin so that we know when everything "in between" has made it
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
pp.retryState[pp.highWatermark].expectChaser = true
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
// a new HWM means that our current broker selection is out of date
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
pp.output = nil
}
func (pp *partitionProducer) flushRetryBuffers() {
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
for {
pp.highWatermark--
if pp.output == nil {
if err := pp.updateLeader(); err != nil {
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
goto flushDone
}
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
}
for _, msg := range pp.retryState[pp.highWatermark].buf {
pp.output <- msg
}
flushDone:
pp.retryState[pp.highWatermark].buf = nil
if pp.retryState[pp.highWatermark].expectChaser {
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
break
} else if pp.highWatermark == 0 {
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
break
}
}
}
func (pp *partitionProducer) updateLeader() error {
return pp.breaker.Run(func() (err error) {
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
return err
}
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
return err
}
pp.output = pp.parent.getBrokerProducer(pp.leader)
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
return nil
})
}
// one per broker; also constructs an associated flusher
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
var (
input = make(chan *ProducerMessage)
bridge = make(chan *produceSet)
responses = make(chan *brokerProducerResponse)
)
bp := &brokerProducer{
parent: p,
broker: broker,
input: input,
output: bridge,
responses: responses,
buffer: newProduceSet(p),
currentRetries: make(map[string]map[int32]error),
}
go withRecover(bp.run)
// minimal bridge to make the network response `select`able
go withRecover(func() {
for set := range bridge {
request := set.buildRequest()
response, err := broker.Produce(request)
responses <- &brokerProducerResponse{
set: set,
err: err,
res: response,
}
}
close(responses)
})
return input
}
type brokerProducerResponse struct {
set *produceSet
err error
res *ProduceResponse
}
// groups messages together into appropriately-sized batches for sending to the broker
// handles state related to retries etc
type brokerProducer struct {
parent *asyncProducer
broker *Broker
input <-chan *ProducerMessage
output chan<- *produceSet
responses <-chan *brokerProducerResponse
buffer *produceSet
timer <-chan time.Time
timerFired bool
closing error
currentRetries map[string]map[int32]error
}
func (bp *brokerProducer) run() {
var output chan<- *produceSet
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
for {
select {
case msg := <-bp.input:
if msg == nil {
bp.shutdown()
return
}
if msg.flags&syn == syn {
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
if bp.currentRetries[msg.Topic] == nil {
bp.currentRetries[msg.Topic] = make(map[int32]error)
}
bp.currentRetries[msg.Topic][msg.Partition] = nil
bp.parent.inFlight.Done()
continue
}
if reason := bp.needsRetry(msg); reason != nil {
bp.parent.retryMessage(msg, reason)
if bp.closing == nil && msg.flags&fin == fin {
// we were retrying this partition but we can start processing again
delete(bp.currentRetries[msg.Topic], msg.Partition)
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
bp.broker.ID(), msg.Topic, msg.Partition)
}
continue
}
if bp.buffer.wouldOverflow(msg) {
if err := bp.waitForSpace(msg); err != nil {
bp.parent.retryMessage(msg, err)
continue
}
}
if err := bp.buffer.add(msg); err != nil {
bp.parent.returnError(msg, err)
continue
}
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
}
case <-bp.timer:
bp.timerFired = true
case output <- bp.buffer:
bp.rollOver()
case response := <-bp.responses:
bp.handleResponse(response)
}
if bp.timerFired || bp.buffer.readyToFlush() {
output = bp.output
} else {
output = nil
}
}
}
func (bp *brokerProducer) shutdown() {
for !bp.buffer.empty() {
select {
case response := <-bp.responses:
bp.handleResponse(response)
case bp.output <- bp.buffer:
bp.rollOver()
}
}
close(bp.output)
for response := range bp.responses {
bp.handleResponse(response)
}
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
}
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
if bp.closing != nil {
return bp.closing
}
return bp.currentRetries[msg.Topic][msg.Partition]
}
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
for {
select {
case response := <-bp.responses:
bp.handleResponse(response)
// handling a response can change our state, so re-check some things
if reason := bp.needsRetry(msg); reason != nil {
return reason
} else if !bp.buffer.wouldOverflow(msg) {
return nil
}
case bp.output <- bp.buffer:
bp.rollOver()
return nil
}
}
}
func (bp *brokerProducer) rollOver() {
bp.timer = nil
bp.timerFired = false
bp.buffer = newProduceSet(bp.parent)
}
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
if response.err != nil {
bp.handleError(response.set, response.err)
} else {
bp.handleSuccess(response.set, response.res)
}
if bp.buffer.empty() {
bp.rollOver() // this can happen if the response invalidated our buffer
}
}
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
// we iterate through the blocks in the request set, not the response, so that we notice
// if the response is missing a block completely
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
if response == nil {
// this only happens when RequiredAcks is NoResponse, so we have to assume success
bp.parent.returnSuccesses(msgs)
return
}
block := response.GetBlock(topic, partition)
if block == nil {
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
return
}
switch block.Err {
// Success
case ErrNoError:
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
for _, msg := range msgs {
msg.Timestamp = block.Timestamp
}
}
for i, msg := range msgs {
msg.Offset = block.Offset + int64(i)
}
bp.parent.returnSuccesses(msgs)
// Retriable errors
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
bp.broker.ID(), topic, partition, block.Err)
bp.currentRetries[topic][partition] = block.Err
bp.parent.retryMessages(msgs, block.Err)
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
// Other non-retriable errors
default:
bp.parent.returnErrors(msgs, block.Err)
}
})
}
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
switch err.(type) {
case PacketEncodingError:
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.returnErrors(msgs, err)
})
default:
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
bp.parent.abandonBrokerConnection(bp.broker)
_ = bp.broker.Close()
bp.closing = err
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
bp.parent.retryMessages(msgs, err)
})
bp.rollOver()
}
}
// singleton
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
func (p *asyncProducer) retryHandler() {
var msg *ProducerMessage
buf := queue.New()
for {
if buf.Length() == 0 {
msg = <-p.retries
} else {
select {
case msg = <-p.retries:
case p.input <- buf.Peek().(*ProducerMessage):
buf.Remove()
continue
}
}
if msg == nil {
return
}
buf.Add(msg)
}
}
// utility functions
func (p *asyncProducer) shutdown() {
Logger.Println("Producer shutting down.")
p.inFlight.Add(1)
p.input <- &ProducerMessage{flags: shutdown}
p.inFlight.Wait()
if p.ownClient {
err := p.client.Close()
if err != nil {
Logger.Println("producer/shutdown failed to close the embedded client:", err)
}
}
close(p.input)
close(p.retries)
close(p.errors)
close(p.successes)
}
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
msg.clear()
pErr := &ProducerError{Msg: msg, Err: err}
if p.conf.Producer.Return.Errors {
p.errors <- pErr
} else {
Logger.Println(pErr)
}
p.inFlight.Done()
}
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.returnError(msg, err)
}
}
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
for _, msg := range batch {
if p.conf.Producer.Return.Successes {
msg.clear()
p.successes <- msg
}
p.inFlight.Done()
}
}
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
if msg.retries >= p.conf.Producer.Retry.Max {
p.returnError(msg, err)
} else {
msg.retries++
p.retries <- msg
}
}
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
for _, msg := range batch {
p.retryMessage(msg, err)
}
}
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
bp := p.brokers[broker]
if bp == nil {
bp = p.newBrokerProducer(broker)
p.brokers[broker] = bp
p.brokerRefs[bp] = 0
}
p.brokerRefs[bp]++
return bp
}
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
p.brokerRefs[bp]--
if p.brokerRefs[bp] == 0 {
close(bp)
delete(p.brokerRefs, bp)
if p.brokers[broker] == bp {
delete(p.brokers, broker)
}
}
}
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
p.brokerLock.Lock()
defer p.brokerLock.Unlock()
delete(p.brokers, broker)
}

685
vendor/github.com/Shopify/sarama/broker.go generated vendored Normal file
View File

@ -0,0 +1,685 @@
package sarama
import (
"crypto/tls"
"encoding/binary"
"fmt"
"io"
"net"
"strconv"
"sync"
"sync/atomic"
"time"
"github.com/rcrowley/go-metrics"
)
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
type Broker struct {
id int32
addr string
conf *Config
correlationID int32
conn net.Conn
connErr error
lock sync.Mutex
opened int32
responses chan responsePromise
done chan bool
incomingByteRate metrics.Meter
requestRate metrics.Meter
requestSize metrics.Histogram
requestLatency metrics.Histogram
outgoingByteRate metrics.Meter
responseRate metrics.Meter
responseSize metrics.Histogram
brokerIncomingByteRate metrics.Meter
brokerRequestRate metrics.Meter
brokerRequestSize metrics.Histogram
brokerRequestLatency metrics.Histogram
brokerOutgoingByteRate metrics.Meter
brokerResponseRate metrics.Meter
brokerResponseSize metrics.Histogram
}
type responsePromise struct {
requestTime time.Time
correlationID int32
packets chan []byte
errors chan error
}
// NewBroker creates and returns a Broker targeting the given host:port address.
// This does not attempt to actually connect, you have to call Open() for that.
func NewBroker(addr string) *Broker {
return &Broker{id: -1, addr: addr}
}
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
// waiting for the connection to complete. This means that any subsequent operations on the broker will
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
func (b *Broker) Open(conf *Config) error {
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
return ErrAlreadyConnected
}
if conf == nil {
conf = NewConfig()
}
err := conf.Validate()
if err != nil {
return err
}
b.lock.Lock()
go withRecover(func() {
defer b.lock.Unlock()
dialer := net.Dialer{
Timeout: conf.Net.DialTimeout,
KeepAlive: conf.Net.KeepAlive,
}
if conf.Net.TLS.Enable {
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
} else {
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
}
if b.connErr != nil {
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
b.conn = newBufConn(b.conn)
b.conf = conf
// Create or reuse the global metrics shared between brokers
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
// the same id (-1) and are already exposed through the global metrics above
if b.id >= 0 {
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
}
if conf.Net.SASL.Enable {
b.connErr = b.sendAndReceiveSASLPlainAuth()
if b.connErr != nil {
err = b.conn.Close()
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
b.conn = nil
atomic.StoreInt32(&b.opened, 0)
return
}
}
b.done = make(chan bool)
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
if b.id >= 0 {
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
} else {
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
}
go withRecover(b.responseReceiver)
})
return nil
}
// Connected returns true if the broker is connected and false otherwise. If the broker is not
// connected but it had tried to connect, the error from that connection attempt is also returned.
func (b *Broker) Connected() (bool, error) {
b.lock.Lock()
defer b.lock.Unlock()
return b.conn != nil, b.connErr
}
func (b *Broker) Close() error {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
return ErrNotConnected
}
close(b.responses)
<-b.done
err := b.conn.Close()
b.conn = nil
b.connErr = nil
b.done = nil
b.responses = nil
if err == nil {
Logger.Printf("Closed connection to broker %s\n", b.addr)
} else {
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
}
atomic.StoreInt32(&b.opened, 0)
return err
}
// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
func (b *Broker) ID() int32 {
return b.id
}
// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
func (b *Broker) Addr() string {
return b.addr
}
func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
response := new(MetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
response := new(ConsumerMetadataResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
response := new(OffsetResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
var response *ProduceResponse
var err error
if request.RequiredAcks == NoResponse {
err = b.sendAndReceive(request, nil)
} else {
response = new(ProduceResponse)
err = b.sendAndReceive(request, response)
}
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
response := new(FetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
response := new(OffsetCommitResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
response := new(OffsetFetchResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
response := new(JoinGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
response := new(SyncGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
response := new(LeaveGroupResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
response := new(HeartbeatResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
response := new(ListGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
response := new(DescribeGroupsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) ApiVersions(request *ApiVersionsRequest) (*ApiVersionsResponse, error) {
response := new(ApiVersionsResponse)
err := b.sendAndReceive(request, response)
if err != nil {
return nil, err
}
return response, nil
}
func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
b.lock.Lock()
defer b.lock.Unlock()
if b.conn == nil {
if b.connErr != nil {
return nil, b.connErr
}
return nil, ErrNotConnected
}
if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
return nil, ErrUnsupportedVersion
}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return nil, err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return nil, err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
return nil, err
}
b.correlationID++
if !promiseResponse {
// Record request latency without the response
b.updateRequestLatencyMetrics(time.Since(requestTime))
return nil, nil
}
promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
b.responses <- promise
return &promise, nil
}
func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
promise, err := b.send(req, res != nil)
if err != nil {
return err
}
if promise == nil {
return nil
}
select {
case buf := <-promise.packets:
return versionedDecode(buf, res, req.version())
case err = <-promise.errors:
return err
}
}
func (b *Broker) decode(pd packetDecoder) (err error) {
b.id, err = pd.getInt32()
if err != nil {
return err
}
host, err := pd.getString()
if err != nil {
return err
}
port, err := pd.getInt32()
if err != nil {
return err
}
b.addr = net.JoinHostPort(host, fmt.Sprint(port))
if _, _, err := net.SplitHostPort(b.addr); err != nil {
return err
}
return nil
}
func (b *Broker) encode(pe packetEncoder) (err error) {
host, portstr, err := net.SplitHostPort(b.addr)
if err != nil {
return err
}
port, err := strconv.Atoi(portstr)
if err != nil {
return err
}
pe.putInt32(b.id)
err = pe.putString(host)
if err != nil {
return err
}
pe.putInt32(int32(port))
return nil
}
func (b *Broker) responseReceiver() {
var dead error
header := make([]byte, 8)
for response := range b.responses {
if dead != nil {
response.errors <- dead
continue
}
err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
if err != nil {
dead = err
response.errors <- err
continue
}
bytesReadHeader, err := io.ReadFull(b.conn, header)
requestLatency := time.Since(response.requestTime)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
decodedHeader := responseHeader{}
err = decode(header, &decodedHeader)
if err != nil {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
dead = err
response.errors <- err
continue
}
if decodedHeader.correlationID != response.correlationID {
b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
// TODO if decoded ID < cur ID, discard until we catch up
// TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
response.errors <- dead
continue
}
buf := make([]byte, decodedHeader.length-4)
bytesReadBody, err := io.ReadFull(b.conn, buf)
b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
if err != nil {
dead = err
response.errors <- err
continue
}
response.packets <- buf
}
close(b.done)
}
func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
rb := &SaslHandshakeRequest{"PLAIN"}
req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
buf, err := encode(req, b.conf.MetricRegistry)
if err != nil {
return err
}
err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
return err
}
requestTime := time.Now()
bytes, err := b.conn.Write(buf)
b.updateOutgoingCommunicationMetrics(bytes)
if err != nil {
Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
return err
}
b.correlationID++
//wait for the response
header := make([]byte, 8) // response header
_, err = io.ReadFull(b.conn, header)
if err != nil {
Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
return err
}
length := binary.BigEndian.Uint32(header[:4])
payload := make([]byte, length-4)
n, err := io.ReadFull(b.conn, payload)
if err != nil {
Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
return err
}
b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
res := &SaslHandshakeResponse{}
err = versionedDecode(payload, res, 0)
if err != nil {
Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
return err
}
if res.Err != ErrNoError {
Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
return res.Err
}
Logger.Print("Successful SASL handshake")
return nil
}
// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
//
// In SASL Plain, Kafka expects the auth header to be in the following format
// Message format (from https://tools.ietf.org/html/rfc4616):
//
// message = [authzid] UTF8NUL authcid UTF8NUL passwd
// authcid = 1*SAFE ; MUST accept up to 255 octets
// authzid = 1*SAFE ; MUST accept up to 255 octets
// passwd = 1*SAFE ; MUST accept up to 255 octets
// UTF8NUL = %x00 ; UTF-8 encoded NUL character
//
// SAFE = UTF1 / UTF2 / UTF3 / UTF4
// ;; any UTF-8 encoded Unicode character except NUL
//
// When credentials are valid, Kafka returns a 4 byte array of null characters.
// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
// of responding to bad credentials but thats how its being done today.
func (b *Broker) sendAndReceiveSASLPlainAuth() error {
if b.conf.Net.SASL.Handshake {
handshakeErr := b.sendAndReceiveSASLPlainHandshake()
if handshakeErr != nil {
Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
return handshakeErr
}
}
length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
authBytes := make([]byte, length+4) //4 byte length header + auth data
binary.BigEndian.PutUint32(authBytes, uint32(length))
copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
if err != nil {
Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
return err
}
requestTime := time.Now()
bytesWritten, err := b.conn.Write(authBytes)
b.updateOutgoingCommunicationMetrics(bytesWritten)
if err != nil {
Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
return err
}
header := make([]byte, 4)
n, err := io.ReadFull(b.conn, header)
b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
// If the credentials are valid, we would get a 4 byte response filled with null characters.
// Otherwise, the broker closes the connection and we get an EOF
if err != nil {
Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
return err
}
Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
return nil
}
func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
b.updateRequestLatencyMetrics(requestLatency)
b.responseRate.Mark(1)
if b.brokerResponseRate != nil {
b.brokerResponseRate.Mark(1)
}
responseSize := int64(bytes)
b.incomingByteRate.Mark(responseSize)
if b.brokerIncomingByteRate != nil {
b.brokerIncomingByteRate.Mark(responseSize)
}
b.responseSize.Update(responseSize)
if b.brokerResponseSize != nil {
b.brokerResponseSize.Update(responseSize)
}
}
func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
requestLatencyInMs := int64(requestLatency / time.Millisecond)
b.requestLatency.Update(requestLatencyInMs)
if b.brokerRequestLatency != nil {
b.brokerRequestLatency.Update(requestLatencyInMs)
}
}
func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
b.requestRate.Mark(1)
if b.brokerRequestRate != nil {
b.brokerRequestRate.Mark(1)
}
requestSize := int64(bytes)
b.outgoingByteRate.Mark(requestSize)
if b.brokerOutgoingByteRate != nil {
b.brokerOutgoingByteRate.Mark(requestSize)
}
b.requestSize.Update(requestSize)
if b.brokerRequestSize != nil {
b.brokerRequestSize.Update(requestSize)
}
}

791
vendor/github.com/Shopify/sarama/client.go generated vendored Normal file
View File

@ -0,0 +1,791 @@
package sarama
import (
"math/rand"
"sort"
"sync"
"time"
)
// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
// automatically when it passes out of scope. It is safe to share a client amongst many
// users, however Kafka will process requests from a single client strictly in serial,
// so it is generally more efficient to use the default one client per producer/consumer.
type Client interface {
// Config returns the Config struct of the client. This struct should not be
// altered after it has been created.
Config() *Config
// Brokers returns the current set of active brokers as retrieved from cluster metadata.
Brokers() []*Broker
// Topics returns the set of available topics as retrieved from cluster metadata.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
Partitions(topic string) ([]int32, error)
// WritablePartitions returns the sorted list of all writable partition IDs for
// the given topic, where "writable" means "having a valid leader accepting
// writes".
WritablePartitions(topic string) ([]int32, error)
// Leader returns the broker object that is the leader of the current
// topic/partition, as determined by querying the cluster metadata.
Leader(topic string, partitionID int32) (*Broker, error)
// Replicas returns the set of all replica IDs for the given partition.
Replicas(topic string, partitionID int32) ([]int32, error)
// InSyncReplicas returns the set of all in-sync replica IDs for the given
// partition. In-sync replicas are replicas which are fully caught up with
// the partition leader.
InSyncReplicas(topic string, partitionID int32) ([]int32, error)
// RefreshMetadata takes a list of topics and queries the cluster to refresh the
// available metadata for those topics. If no topics are provided, it will refresh
// metadata for all topics.
RefreshMetadata(topics ...string) error
// GetOffset queries the cluster to get the most recent available offset at the
// given time on the topic/partition combination. Time should be OffsetOldest for
// the earliest available offset, OffsetNewest for the offset of the message that
// will be produced next, or a time.
GetOffset(topic string, partitionID int32, time int64) (int64, error)
// Coordinator returns the coordinating broker for a consumer group. It will
// return a locally cached value if it's available. You can call
// RefreshCoordinator to update the cached value. This function only works on
// Kafka 0.8.2 and higher.
Coordinator(consumerGroup string) (*Broker, error)
// RefreshCoordinator retrieves the coordinator for a consumer group and stores it
// in local cache. This function only works on Kafka 0.8.2 and higher.
RefreshCoordinator(consumerGroup string) error
// Close shuts down all broker connections managed by this client. It is required
// to call this function before a client object passes out of scope, as it will
// otherwise leak memory. You must close any Producers or Consumers using a client
// before you close the client.
Close() error
// Closed returns true if the client has already had Close called on it
Closed() bool
}
const (
// OffsetNewest stands for the log head offset, i.e. the offset that will be
// assigned to the next message that will be produced to the partition. You
// can send this to a client's GetOffset method to get this offset, or when
// calling ConsumePartition to start consuming new messages.
OffsetNewest int64 = -1
// OffsetOldest stands for the oldest offset available on the broker for a
// partition. You can send this to a client's GetOffset method to get this
// offset, or when calling ConsumePartition to start consuming from the
// oldest offset that is still available on the broker.
OffsetOldest int64 = -2
)
type client struct {
conf *Config
closer, closed chan none // for shutting down background metadata updater
// the broker addresses given to us through the constructor are not guaranteed to be returned in
// the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
// so we store them separately
seedBrokers []*Broker
deadSeeds []*Broker
brokers map[int32]*Broker // maps broker ids to brokers
metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
// If the number of partitions is large, we can get some churn calling cachedPartitions,
// so the result is cached. It is important to update this value whenever metadata is changed
cachedPartitionsResults map[string][maxPartitionIndex][]int32
lock sync.RWMutex // protects access to the maps that hold cluster state.
}
// NewClient creates a new Client. It connects to one of the given broker addresses
// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
// be retrieved from any of the given broker addresses, the client is not created.
func NewClient(addrs []string, conf *Config) (Client, error) {
Logger.Println("Initializing new client")
if conf == nil {
conf = NewConfig()
}
if err := conf.Validate(); err != nil {
return nil, err
}
if len(addrs) < 1 {
return nil, ConfigurationError("You must provide at least one broker address")
}
client := &client{
conf: conf,
closer: make(chan none),
closed: make(chan none),
brokers: make(map[int32]*Broker),
metadata: make(map[string]map[int32]*PartitionMetadata),
cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
coordinators: make(map[string]int32),
}
random := rand.New(rand.NewSource(time.Now().UnixNano()))
for _, index := range random.Perm(len(addrs)) {
client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
}
if conf.Metadata.Full {
// do an initial fetch of all cluster metadata by specifying an empty list of topics
err := client.RefreshMetadata()
switch err {
case nil:
break
case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
// indicates that maybe part of the cluster is down, but is not fatal to creating the client
Logger.Println(err)
default:
close(client.closed) // we haven't started the background updater yet, so we have to do this manually
_ = client.Close()
return nil, err
}
}
go withRecover(client.backgroundMetadataUpdater)
Logger.Println("Successfully initialized new client")
return client, nil
}
func (client *client) Config() *Config {
return client.conf
}
func (client *client) Brokers() []*Broker {
client.lock.RLock()
defer client.lock.RUnlock()
brokers := make([]*Broker, 0)
for _, broker := range client.brokers {
brokers = append(brokers, broker)
}
return brokers
}
func (client *client) Close() error {
if client.Closed() {
// Chances are this is being called from a defer() and the error will go unobserved
// so we go ahead and log the event in this case.
Logger.Printf("Close() called on already closed client")
return ErrClosedClient
}
// shutdown and wait for the background thread before we take the lock, to avoid races
close(client.closer)
<-client.closed
client.lock.Lock()
defer client.lock.Unlock()
Logger.Println("Closing Client")
for _, broker := range client.brokers {
safeAsyncClose(broker)
}
for _, broker := range client.seedBrokers {
safeAsyncClose(broker)
}
client.brokers = nil
client.metadata = nil
return nil
}
func (client *client) Closed() bool {
return client.brokers == nil
}
func (client *client) Topics() ([]string, error) {
if client.Closed() {
return nil, ErrClosedClient
}
client.lock.RLock()
defer client.lock.RUnlock()
ret := make([]string, 0, len(client.metadata))
for topic := range client.metadata {
ret = append(ret, topic)
}
return ret, nil
}
func (client *client) Partitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, allPartitions)
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, allPartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) WritablePartitions(topic string) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
partitions := client.cachedPartitions(topic, writablePartitions)
// len==0 catches when it's nil (no such topic) and the odd case when every single
// partition is undergoing leader election simultaneously. Callers have to be able to handle
// this function returning an empty slice (which is a valid return value) but catching it
// here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
// a metadata refresh as a nicety so callers can just try again and don't have to manually
// trigger a refresh (otherwise they'd just keep getting a stale cached copy).
if len(partitions) == 0 {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
partitions = client.cachedPartitions(topic, writablePartitions)
}
if partitions == nil {
return nil, ErrUnknownTopicOrPartition
}
return partitions, nil
}
func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupInt32Slice(metadata.Replicas), nil
}
func (client *client) InSyncReplicas(topic string, partitionID int32) ([]int32, error) {
if client.Closed() {
return nil, ErrClosedClient
}
metadata := client.cachedMetadata(topic, partitionID)
if metadata == nil {
err := client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
metadata = client.cachedMetadata(topic, partitionID)
}
if metadata == nil {
return nil, ErrUnknownTopicOrPartition
}
if metadata.Err == ErrReplicaNotAvailable {
return nil, metadata.Err
}
return dupInt32Slice(metadata.Isr), nil
}
func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
leader, err := client.cachedLeader(topic, partitionID)
if leader == nil {
err = client.RefreshMetadata(topic)
if err != nil {
return nil, err
}
leader, err = client.cachedLeader(topic, partitionID)
}
return leader, err
}
func (client *client) RefreshMetadata(topics ...string) error {
if client.Closed() {
return ErrClosedClient
}
// Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
// error. This handles the case by returning an error instead of sending it
// off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
for _, topic := range topics {
if len(topic) == 0 {
return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
}
}
return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
}
func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
if client.Closed() {
return -1, ErrClosedClient
}
offset, err := client.getOffset(topic, partitionID, time)
if err != nil {
if err := client.RefreshMetadata(topic); err != nil {
return -1, err
}
return client.getOffset(topic, partitionID, time)
}
return offset, err
}
func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
if client.Closed() {
return nil, ErrClosedClient
}
coordinator := client.cachedCoordinator(consumerGroup)
if coordinator == nil {
if err := client.RefreshCoordinator(consumerGroup); err != nil {
return nil, err
}
coordinator = client.cachedCoordinator(consumerGroup)
}
if coordinator == nil {
return nil, ErrConsumerCoordinatorNotAvailable
}
_ = coordinator.Open(client.conf)
return coordinator, nil
}
func (client *client) RefreshCoordinator(consumerGroup string) error {
if client.Closed() {
return ErrClosedClient
}
response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
if err != nil {
return err
}
client.lock.Lock()
defer client.lock.Unlock()
client.registerBroker(response.Coordinator)
client.coordinators[consumerGroup] = response.Coordinator.ID()
return nil
}
// private broker management helpers
// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
// in the brokers map. It returns the broker that is registered, which may be the provided broker,
// or a previously registered Broker instance. You must hold the write lock before calling this function.
func (client *client) registerBroker(broker *Broker) {
if client.brokers[broker.ID()] == nil {
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
} else if broker.Addr() != client.brokers[broker.ID()].Addr() {
safeAsyncClose(client.brokers[broker.ID()])
client.brokers[broker.ID()] = broker
Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
}
}
// deregisterBroker removes a broker from the seedsBroker list, and if it's
// not the seedbroker, removes it from brokers map completely.
func (client *client) deregisterBroker(broker *Broker) {
client.lock.Lock()
defer client.lock.Unlock()
if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
client.deadSeeds = append(client.deadSeeds, broker)
client.seedBrokers = client.seedBrokers[1:]
} else {
// we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
// but we really shouldn't have to; once that loop is made better this case can be
// removed, and the function generally can be renamed from `deregisterBroker` to
// `nextSeedBroker` or something
Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
delete(client.brokers, broker.ID())
}
}
func (client *client) resurrectDeadBrokers() {
client.lock.Lock()
defer client.lock.Unlock()
Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
client.deadSeeds = nil
}
func (client *client) any() *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if len(client.seedBrokers) > 0 {
_ = client.seedBrokers[0].Open(client.conf)
return client.seedBrokers[0]
}
// not guaranteed to be random *or* deterministic
for _, broker := range client.brokers {
_ = broker.Open(client.conf)
return broker
}
return nil
}
// private caching/lazy metadata helpers
type partitionType int
const (
allPartitions partitionType = iota
writablePartitions
// If you add any more types, update the partition cache in update()
// Ensure this is the last partition type value
maxPartitionIndex
)
func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
return partitions[partitionID]
}
return nil
}
func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
client.lock.RLock()
defer client.lock.RUnlock()
partitions, exists := client.cachedPartitionsResults[topic]
if !exists {
return nil
}
return partitions[partitionSet]
}
func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
partitions := client.metadata[topic]
if partitions == nil {
return nil
}
ret := make([]int32, 0, len(partitions))
for _, partition := range partitions {
if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
continue
}
ret = append(ret, partition.ID)
}
sort.Sort(int32Slice(ret))
return ret
}
func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
client.lock.RLock()
defer client.lock.RUnlock()
partitions := client.metadata[topic]
if partitions != nil {
metadata, ok := partitions[partitionID]
if ok {
if metadata.Err == ErrLeaderNotAvailable {
return nil, ErrLeaderNotAvailable
}
b := client.brokers[metadata.Leader]
if b == nil {
return nil, ErrLeaderNotAvailable
}
_ = b.Open(client.conf)
return b, nil
}
}
return nil, ErrUnknownTopicOrPartition
}
func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
broker, err := client.Leader(topic, partitionID)
if err != nil {
return -1, err
}
request := &OffsetRequest{}
if client.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 1
}
request.AddBlock(topic, partitionID, time, 1)
response, err := broker.GetAvailableOffsets(request)
if err != nil {
_ = broker.Close()
return -1, err
}
block := response.GetBlock(topic, partitionID)
if block == nil {
_ = broker.Close()
return -1, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return -1, block.Err
}
if len(block.Offsets) != 1 {
return -1, ErrOffsetOutOfRange
}
return block.Offsets[0], nil
}
// core metadata update logic
func (client *client) backgroundMetadataUpdater() {
defer close(client.closed)
if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
return
}
ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
defer ticker.Stop()
for {
select {
case <-ticker.C:
topics := []string{}
if !client.conf.Metadata.Full {
if specificTopics, err := client.Topics(); err != nil {
Logger.Println("Client background metadata topic load:", err)
break
} else {
topics = specificTopics
}
}
if err := client.RefreshMetadata(topics...); err != nil {
Logger.Println("Client background metadata update:", err)
}
case <-client.closer:
return
}
}
}
func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
retry := func(err error) error {
if attemptsRemaining > 0 {
Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.tryRefreshMetadata(topics, attemptsRemaining-1)
}
return err
}
for broker := client.any(); broker != nil; broker = client.any() {
if len(topics) > 0 {
Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
} else {
Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
}
response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
switch err.(type) {
case nil:
// valid response, use it
shouldRetry, err := client.updateMetadata(response)
if shouldRetry {
Logger.Println("client/metadata found some partitions to be leaderless")
return retry(err) // note: err can be nil
}
return err
case PacketEncodingError:
// didn't even send, return the error
return err
default:
// some other error, remove that broker and try again
Logger.Println("client/metadata got error from broker while fetching metadata:", err)
_ = broker.Close()
client.deregisterBroker(broker)
}
}
Logger.Println("client/metadata no available broker to send metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}
// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
client.lock.Lock()
defer client.lock.Unlock()
// For all the brokers we received:
// - if it is a new ID, save it
// - if it is an existing ID, but the address we have is stale, discard the old one and save it
// - otherwise ignore it, replacing our existing one would just bounce the connection
for _, broker := range data.Brokers {
client.registerBroker(broker)
}
for _, topic := range data.Topics {
delete(client.metadata, topic.Name)
delete(client.cachedPartitionsResults, topic.Name)
switch topic.Err {
case ErrNoError:
break
case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
err = topic.Err
continue
case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
err = topic.Err
retry = true
continue
case ErrLeaderNotAvailable: // retry, but store partial partition results
retry = true
break
default: // don't retry, don't store partial results
Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
err = topic.Err
continue
}
client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
for _, partition := range topic.Partitions {
client.metadata[topic.Name][partition.ID] = partition
if partition.Err == ErrLeaderNotAvailable {
retry = true
}
}
var partitionCache [maxPartitionIndex][]int32
partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
client.cachedPartitionsResults[topic.Name] = partitionCache
}
return
}
func (client *client) cachedCoordinator(consumerGroup string) *Broker {
client.lock.RLock()
defer client.lock.RUnlock()
if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
return client.brokers[coordinatorID]
}
return nil
}
func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
retry := func(err error) (*ConsumerMetadataResponse, error) {
if attemptsRemaining > 0 {
Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
time.Sleep(client.conf.Metadata.Retry.Backoff)
return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
}
return nil, err
}
for broker := client.any(); broker != nil; broker = client.any() {
Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
request := new(ConsumerMetadataRequest)
request.ConsumerGroup = consumerGroup
response, err := broker.GetConsumerMetadata(request)
if err != nil {
Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
switch err.(type) {
case PacketEncodingError:
return nil, err
default:
_ = broker.Close()
client.deregisterBroker(broker)
continue
}
}
switch response.Err {
case ErrNoError:
Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
return response, nil
case ErrConsumerCoordinatorNotAvailable:
Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
// This is very ugly, but this scenario will only happen once per cluster.
// The __consumer_offsets topic only has to be created one time.
// The number of partitions not configurable, but partition 0 should always exist.
if _, err := client.Leader("__consumer_offsets", 0); err != nil {
Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
time.Sleep(2 * time.Second)
}
return retry(ErrConsumerCoordinatorNotAvailable)
default:
return nil, response.Err
}
}
Logger.Println("client/coordinator no available broker to send consumer metadata request to")
client.resurrectDeadBrokers()
return retry(ErrOutOfBrokers)
}

430
vendor/github.com/Shopify/sarama/config.go generated vendored Normal file
View File

@ -0,0 +1,430 @@
package sarama
import (
"crypto/tls"
"regexp"
"time"
"github.com/rcrowley/go-metrics"
)
const defaultClientID = "sarama"
var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
// Config is used to pass multiple configuration options to Sarama's constructors.
type Config struct {
// Net is the namespace for network-level properties used by the Broker, and
// shared by the Client/Producer/Consumer.
Net struct {
// How many outstanding requests a connection is allowed to have before
// sending on it blocks (default 5).
MaxOpenRequests int
// All three of the below configurations are similar to the
// `socket.timeout.ms` setting in JVM kafka. All of them default
// to 30 seconds.
DialTimeout time.Duration // How long to wait for the initial connection.
ReadTimeout time.Duration // How long to wait for a response.
WriteTimeout time.Duration // How long to wait for a transmit.
TLS struct {
// Whether or not to use TLS when connecting to the broker
// (defaults to false).
Enable bool
// The TLS configuration to use for secure connections if
// enabled (defaults to nil).
Config *tls.Config
}
// SASL based authentication with broker. While there are multiple SASL authentication methods
// the current implementation is limited to plaintext (SASL/PLAIN) authentication
SASL struct {
// Whether or not to use SASL authentication when connecting to the broker
// (defaults to false).
Enable bool
// Whether or not to send the Kafka SASL handshake first if enabled
// (defaults to true). You should only set this to false if you're using
// a non-Kafka SASL proxy.
Handshake bool
//username and password for SASL/PLAIN authentication
User string
Password string
}
// KeepAlive specifies the keep-alive period for an active network connection.
// If zero, keep-alives are disabled. (default is 0: disabled).
KeepAlive time.Duration
}
// Metadata is the namespace for metadata management properties used by the
// Client, and shared by the Producer/Consumer.
Metadata struct {
Retry struct {
// The total number of times to retry a metadata request when the
// cluster is in the middle of a leader election (default 3).
Max int
// How long to wait for leader election to occur before retrying
// (default 250ms). Similar to the JVM's `retry.backoff.ms`.
Backoff time.Duration
}
// How frequently to refresh the cluster metadata in the background.
// Defaults to 10 minutes. Set to 0 to disable. Similar to
// `topic.metadata.refresh.interval.ms` in the JVM version.
RefreshFrequency time.Duration
// Whether to maintain a full set of metadata for all topics, or just
// the minimal set that has been necessary so far. The full set is simpler
// and usually more convenient, but can take up a substantial amount of
// memory if you have many topics and partitions. Defaults to true.
Full bool
}
// Producer is the namespace for configuration related to producing messages,
// used by the Producer.
Producer struct {
// The maximum permitted size of a message (defaults to 1000000). Should be
// set equal to or smaller than the broker's `message.max.bytes`.
MaxMessageBytes int
// The level of acknowledgement reliability needed from the broker (defaults
// to WaitForLocal). Equivalent to the `request.required.acks` setting of the
// JVM producer.
RequiredAcks RequiredAcks
// The maximum duration the broker will wait the receipt of the number of
// RequiredAcks (defaults to 10 seconds). This is only relevant when
// RequiredAcks is set to WaitForAll or a number > 1. Only supports
// millisecond resolution, nanoseconds will be truncated. Equivalent to
// the JVM producer's `request.timeout.ms` setting.
Timeout time.Duration
// The type of compression to use on messages (defaults to no compression).
// Similar to `compression.codec` setting of the JVM producer.
Compression CompressionCodec
// Generates partitioners for choosing the partition to send messages to
// (defaults to hashing the message key). Similar to the `partitioner.class`
// setting for the JVM producer.
Partitioner PartitionerConstructor
// Return specifies what channels will be populated. If they are set to true,
// you must read from the respective channels to prevent deadlock. If,
// however, this config is used to create a `SyncProducer`, both must be set
// to true and you shall not read from the channels since the producer does
// this internally.
Return struct {
// If enabled, successfully delivered messages will be returned on the
// Successes channel (default disabled).
Successes bool
// If enabled, messages that failed to deliver will be returned on the
// Errors channel, including error (default enabled).
Errors bool
}
// The following config options control how often messages are batched up and
// sent to the broker. By default, messages are sent as fast as possible, and
// all messages received while the current batch is in-flight are placed
// into the subsequent batch.
Flush struct {
// The best-effort number of bytes needed to trigger a flush. Use the
// global sarama.MaxRequestSize to set a hard upper limit.
Bytes int
// The best-effort number of messages needed to trigger a flush. Use
// `MaxMessages` to set a hard upper limit.
Messages int
// The best-effort frequency of flushes. Equivalent to
// `queue.buffering.max.ms` setting of JVM producer.
Frequency time.Duration
// The maximum number of messages the producer will send in a single
// broker request. Defaults to 0 for unlimited. Similar to
// `queue.buffering.max.messages` in the JVM producer.
MaxMessages int
}
Retry struct {
// The total number of times to retry sending a message (default 3).
// Similar to the `message.send.max.retries` setting of the JVM producer.
Max int
// How long to wait for the cluster to settle between retries
// (default 100ms). Similar to the `retry.backoff.ms` setting of the
// JVM producer.
Backoff time.Duration
}
}
// Consumer is the namespace for configuration related to consuming messages,
// used by the Consumer.
//
// Note that Sarama's Consumer type does not currently support automatic
// consumer-group rebalancing and offset tracking. For Zookeeper-based
// tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
// library builds on Sarama to add this support. For Kafka-based tracking
// (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
// builds on Sarama to add this support.
Consumer struct {
Retry struct {
// How long to wait after a failing to read from a partition before
// trying again (default 2s).
Backoff time.Duration
}
// Fetch is the namespace for controlling how many bytes are retrieved by any
// given request.
Fetch struct {
// The minimum number of message bytes to fetch in a request - the broker
// will wait until at least this many are available. The default is 1,
// as 0 causes the consumer to spin when no messages are available.
// Equivalent to the JVM's `fetch.min.bytes`.
Min int32
// The default number of message bytes to fetch from the broker in each
// request (default 32768). This should be larger than the majority of
// your messages, or else the consumer will spend a lot of time
// negotiating sizes and not actually consuming. Similar to the JVM's
// `fetch.message.max.bytes`.
Default int32
// The maximum number of message bytes to fetch from the broker in a
// single request. Messages larger than this will return
// ErrMessageTooLarge and will not be consumable, so you must be sure
// this is at least as large as your largest message. Defaults to 0
// (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
// global `sarama.MaxResponseSize` still applies.
Max int32
}
// The maximum amount of time the broker will wait for Consumer.Fetch.Min
// bytes to become available before it returns fewer than that anyways. The
// default is 250ms, since 0 causes the consumer to spin when no events are
// available. 100-500ms is a reasonable range for most cases. Kafka only
// supports precision up to milliseconds; nanoseconds will be truncated.
// Equivalent to the JVM's `fetch.wait.max.ms`.
MaxWaitTime time.Duration
// The maximum amount of time the consumer expects a message takes to process
// for the user. If writing to the Messages channel takes longer than this,
// that partition will stop fetching more messages until it can proceed again.
// Note that, since the Messages channel is buffered, the actual grace time is
// (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
MaxProcessingTime time.Duration
// Return specifies what channels will be populated. If they are set to true,
// you must read from them to prevent deadlock.
Return struct {
// If enabled, any errors that occurred while consuming are returned on
// the Errors channel (default disabled).
Errors bool
}
// Offsets specifies configuration for how and when to commit consumed
// offsets. This currently requires the manual use of an OffsetManager
// but will eventually be automated.
Offsets struct {
// How frequently to commit updated offsets. Defaults to 1s.
CommitInterval time.Duration
// The initial offset to use if no offset was previously committed.
// Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
Initial int64
// The retention duration for committed offsets. If zero, disabled
// (in which case the `offsets.retention.minutes` option on the
// broker will be used). Kafka only supports precision up to
// milliseconds; nanoseconds will be truncated. Requires Kafka
// broker version 0.9.0 or later.
// (default is 0: disabled).
Retention time.Duration
}
}
// A user-provided string sent with every request to the brokers for logging,
// debugging, and auditing purposes. Defaults to "sarama", but you should
// probably set it to something specific to your application.
ClientID string
// The number of events to buffer in internal and external channels. This
// permits the producer and consumer to continue processing some messages
// in the background while user code is working, greatly improving throughput.
// Defaults to 256.
ChannelBufferSize int
// The version of Kafka that Sarama will assume it is running against.
// Defaults to the oldest supported stable version. Since Kafka provides
// backwards-compatibility, setting it to a version older than you have
// will not break anything, although it may prevent you from using the
// latest features. Setting it to a version greater than you are actually
// running may lead to random breakage.
Version KafkaVersion
// The registry to define metrics into.
// Defaults to a local registry.
// If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
// prior to starting Sarama.
// See Examples on how to use the metrics registry
MetricRegistry metrics.Registry
}
// NewConfig returns a new configuration instance with sane defaults.
func NewConfig() *Config {
c := &Config{}
c.Net.MaxOpenRequests = 5
c.Net.DialTimeout = 30 * time.Second
c.Net.ReadTimeout = 30 * time.Second
c.Net.WriteTimeout = 30 * time.Second
c.Net.SASL.Handshake = true
c.Metadata.Retry.Max = 3
c.Metadata.Retry.Backoff = 250 * time.Millisecond
c.Metadata.RefreshFrequency = 10 * time.Minute
c.Metadata.Full = true
c.Producer.MaxMessageBytes = 1000000
c.Producer.RequiredAcks = WaitForLocal
c.Producer.Timeout = 10 * time.Second
c.Producer.Partitioner = NewHashPartitioner
c.Producer.Retry.Max = 3
c.Producer.Retry.Backoff = 100 * time.Millisecond
c.Producer.Return.Errors = true
c.Consumer.Fetch.Min = 1
c.Consumer.Fetch.Default = 32768
c.Consumer.Retry.Backoff = 2 * time.Second
c.Consumer.MaxWaitTime = 250 * time.Millisecond
c.Consumer.MaxProcessingTime = 100 * time.Millisecond
c.Consumer.Return.Errors = false
c.Consumer.Offsets.CommitInterval = 1 * time.Second
c.Consumer.Offsets.Initial = OffsetNewest
c.ClientID = defaultClientID
c.ChannelBufferSize = 256
c.Version = minVersion
c.MetricRegistry = metrics.NewRegistry()
return c
}
// Validate checks a Config instance. It will return a
// ConfigurationError if the specified values don't make sense.
func (c *Config) Validate() error {
// some configuration values should be warned on but not fail completely, do those first
if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
}
if c.Net.SASL.Enable == false {
if c.Net.SASL.User != "" {
Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
}
if c.Net.SASL.Password != "" {
Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
}
}
if c.Producer.RequiredAcks > 1 {
Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
}
if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
Logger.Println("Producer.MaxMessageBytes must be smaller than MaxRequestSize; it will be ignored.")
}
if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
Logger.Println("Producer.Flush.Bytes must be smaller than MaxRequestSize; it will be ignored.")
}
if (c.Producer.Flush.Bytes > 0 || c.Producer.Flush.Messages > 0) && c.Producer.Flush.Frequency == 0 {
Logger.Println("Producer.Flush: Bytes or Messages are set, but Frequency is not; messages may not get flushed.")
}
if c.Producer.Timeout%time.Millisecond != 0 {
Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
}
if c.Consumer.MaxWaitTime < 100*time.Millisecond {
Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
}
if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
}
if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
}
if c.ClientID == defaultClientID {
Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
}
// validate Net values
switch {
case c.Net.MaxOpenRequests <= 0:
return ConfigurationError("Net.MaxOpenRequests must be > 0")
case c.Net.DialTimeout <= 0:
return ConfigurationError("Net.DialTimeout must be > 0")
case c.Net.ReadTimeout <= 0:
return ConfigurationError("Net.ReadTimeout must be > 0")
case c.Net.WriteTimeout <= 0:
return ConfigurationError("Net.WriteTimeout must be > 0")
case c.Net.KeepAlive < 0:
return ConfigurationError("Net.KeepAlive must be >= 0")
case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
}
// validate the Metadata values
switch {
case c.Metadata.Retry.Max < 0:
return ConfigurationError("Metadata.Retry.Max must be >= 0")
case c.Metadata.Retry.Backoff < 0:
return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
case c.Metadata.RefreshFrequency < 0:
return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
}
// validate the Producer values
switch {
case c.Producer.MaxMessageBytes <= 0:
return ConfigurationError("Producer.MaxMessageBytes must be > 0")
case c.Producer.RequiredAcks < -1:
return ConfigurationError("Producer.RequiredAcks must be >= -1")
case c.Producer.Timeout <= 0:
return ConfigurationError("Producer.Timeout must be > 0")
case c.Producer.Partitioner == nil:
return ConfigurationError("Producer.Partitioner must not be nil")
case c.Producer.Flush.Bytes < 0:
return ConfigurationError("Producer.Flush.Bytes must be >= 0")
case c.Producer.Flush.Messages < 0:
return ConfigurationError("Producer.Flush.Messages must be >= 0")
case c.Producer.Flush.Frequency < 0:
return ConfigurationError("Producer.Flush.Frequency must be >= 0")
case c.Producer.Flush.MaxMessages < 0:
return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
case c.Producer.Retry.Max < 0:
return ConfigurationError("Producer.Retry.Max must be >= 0")
case c.Producer.Retry.Backoff < 0:
return ConfigurationError("Producer.Retry.Backoff must be >= 0")
}
if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
}
// validate the Consumer values
switch {
case c.Consumer.Fetch.Min <= 0:
return ConfigurationError("Consumer.Fetch.Min must be > 0")
case c.Consumer.Fetch.Default <= 0:
return ConfigurationError("Consumer.Fetch.Default must be > 0")
case c.Consumer.Fetch.Max < 0:
return ConfigurationError("Consumer.Fetch.Max must be >= 0")
case c.Consumer.MaxWaitTime < 1*time.Millisecond:
return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
case c.Consumer.MaxProcessingTime <= 0:
return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
case c.Consumer.Retry.Backoff < 0:
return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
case c.Consumer.Offsets.CommitInterval <= 0:
return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
}
// validate misc shared values
switch {
case c.ChannelBufferSize < 0:
return ConfigurationError("ChannelBufferSize must be >= 0")
case !validID.MatchString(c.ClientID):
return ConfigurationError("ClientID is invalid")
}
return nil
}

747
vendor/github.com/Shopify/sarama/consumer.go generated vendored Normal file
View File

@ -0,0 +1,747 @@
package sarama
import (
"errors"
"fmt"
"sync"
"sync/atomic"
"time"
)
// ConsumerMessage encapsulates a Kafka message returned by the consumer.
type ConsumerMessage struct {
Key, Value []byte
Topic string
Partition int32
Offset int64
Timestamp time.Time // only set if kafka is version 0.10+, inner message timestamp
BlockTimestamp time.Time // only set if kafka is version 0.10+, outer (compressed) block timestamp
}
// ConsumerError is what is provided to the user when an error occurs.
// It wraps an error and includes the topic and partition.
type ConsumerError struct {
Topic string
Partition int32
Err error
}
func (ce ConsumerError) Error() string {
return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
}
// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
// when stopping.
type ConsumerErrors []*ConsumerError
func (ce ConsumerErrors) Error() string {
return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
}
// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
// scope.
//
// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
type Consumer interface {
// Topics returns the set of available topics as retrieved from the cluster
// metadata. This method is the same as Client.Topics(), and is provided for
// convenience.
Topics() ([]string, error)
// Partitions returns the sorted list of all partition IDs for the given topic.
// This method is the same as Client.Partitions(), and is provided for convenience.
Partitions(topic string) ([]int32, error)
// ConsumePartition creates a PartitionConsumer on the given topic/partition with
// the given offset. It will return an error if this Consumer is already consuming
// on the given topic/partition. Offset can be a literal offset, or OffsetNewest
// or OffsetOldest
ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
// HighWaterMarks returns the current high water marks for each topic and partition.
// Consistency between partitions is not guaranteed since high water marks are updated separately.
HighWaterMarks() map[string]map[int32]int64
// Close shuts down the consumer. It must be called after all child
// PartitionConsumers have already been closed.
Close() error
}
type consumer struct {
client Client
conf *Config
ownClient bool
lock sync.Mutex
children map[string]map[int32]*partitionConsumer
brokerConsumers map[*Broker]*brokerConsumer
}
// NewConsumer creates a new consumer using the given broker addresses and configuration.
func NewConsumer(addrs []string, config *Config) (Consumer, error) {
client, err := NewClient(addrs, config)
if err != nil {
return nil, err
}
c, err := NewConsumerFromClient(client)
if err != nil {
return nil, err
}
c.(*consumer).ownClient = true
return c, nil
}
// NewConsumerFromClient creates a new consumer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this consumer.
func NewConsumerFromClient(client Client) (Consumer, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
c := &consumer{
client: client,
conf: client.Config(),
children: make(map[string]map[int32]*partitionConsumer),
brokerConsumers: make(map[*Broker]*brokerConsumer),
}
return c, nil
}
func (c *consumer) Close() error {
if c.ownClient {
return c.client.Close()
}
return nil
}
func (c *consumer) Topics() ([]string, error) {
return c.client.Topics()
}
func (c *consumer) Partitions(topic string) ([]int32, error) {
return c.client.Partitions(topic)
}
func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
child := &partitionConsumer{
consumer: c,
conf: c.conf,
topic: topic,
partition: partition,
messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
feeder: make(chan *FetchResponse, 1),
trigger: make(chan none, 1),
dying: make(chan none),
fetchSize: c.conf.Consumer.Fetch.Default,
}
if err := child.chooseStartingOffset(offset); err != nil {
return nil, err
}
var leader *Broker
var err error
if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
return nil, err
}
if err := c.addChild(child); err != nil {
return nil, err
}
go withRecover(child.dispatcher)
go withRecover(child.responseFeeder)
child.broker = c.refBrokerConsumer(leader)
child.broker.input <- child
return child, nil
}
func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
c.lock.Lock()
defer c.lock.Unlock()
hwms := make(map[string]map[int32]int64)
for topic, p := range c.children {
hwm := make(map[int32]int64, len(p))
for partition, pc := range p {
hwm[partition] = pc.HighWaterMarkOffset()
}
hwms[topic] = hwm
}
return hwms
}
func (c *consumer) addChild(child *partitionConsumer) error {
c.lock.Lock()
defer c.lock.Unlock()
topicChildren := c.children[child.topic]
if topicChildren == nil {
topicChildren = make(map[int32]*partitionConsumer)
c.children[child.topic] = topicChildren
}
if topicChildren[child.partition] != nil {
return ConfigurationError("That topic/partition is already being consumed")
}
topicChildren[child.partition] = child
return nil
}
func (c *consumer) removeChild(child *partitionConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.children[child.topic], child.partition)
}
func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
c.lock.Lock()
defer c.lock.Unlock()
bc := c.brokerConsumers[broker]
if bc == nil {
bc = c.newBrokerConsumer(broker)
c.brokerConsumers[broker] = bc
}
bc.refs++
return bc
}
func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
brokerWorker.refs--
if brokerWorker.refs == 0 {
close(brokerWorker.input)
if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
delete(c.brokerConsumers, brokerWorker.broker)
}
}
}
func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
c.lock.Lock()
defer c.lock.Unlock()
delete(c.brokerConsumers, brokerWorker.broker)
}
// PartitionConsumer
// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call one of Close() or
// AsyncClose() on a PartitionConsumer to avoid leaks; it will not be garbage-collected automatically when it passes out
// of scope.
//
// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
//
// To terminate such a for/range loop while the loop is executing, call AsyncClose. This will kick off the process of
// consumer tear-down & return imediately. Continue to loop, servicing the Messages channel until the teardown process
// AsyncClose initiated closes it (thus terminating the for/range loop). If you've already ceased reading Messages, call
// Close; this will signal the PartitionConsumer's goroutines to begin shutting down (just like AsyncClose), but will
// also drain the Messages channel, harvest all errors & return them once cleanup has completed.
type PartitionConsumer interface {
// AsyncClose initiates a shutdown of the PartitionConsumer. This method will return immediately, after which you
// should continue to service the 'Messages' and 'Errors' channels until they are empty. It is required to call this
// function, or Close before a consumer object passes out of scope, as it will otherwise leak memory. You must call
// this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionConsumer from fetching messages. It will initiate a shutdown just like AsyncClose, drain
// the Messages channel, harvest any errors & return them to the caller. Note that if you are continuing to service
// the Messages channel when this function is called, you will be competing with Close for messages; consider
// calling AsyncClose, instead. It is required to call this function (or AsyncClose) before a consumer object passes
// out of scope, as it will otherwise leak memory. You must call this before calling Close on the underlying client.
Close() error
// Messages returns the read channel for the messages that are returned by
// the broker.
Messages() <-chan *ConsumerMessage
// Errors returns a read channel of errors that occurred during consuming, if
// enabled. By default, errors are logged and not returned over this channel.
// If you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// HighWaterMarkOffset returns the high water mark offset of the partition,
// i.e. the offset that will be used for the next message that will be produced.
// You can use this to determine how far behind the processing is.
HighWaterMarkOffset() int64
}
type partitionConsumer struct {
highWaterMarkOffset int64 // must be at the top of the struct because https://golang.org/pkg/sync/atomic/#pkg-note-BUG
consumer *consumer
conf *Config
topic string
partition int32
broker *brokerConsumer
messages chan *ConsumerMessage
errors chan *ConsumerError
feeder chan *FetchResponse
trigger, dying chan none
responseResult error
fetchSize int32
offset int64
}
var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
func (child *partitionConsumer) sendError(err error) {
cErr := &ConsumerError{
Topic: child.topic,
Partition: child.partition,
Err: err,
}
if child.conf.Consumer.Return.Errors {
child.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (child *partitionConsumer) dispatcher() {
for range child.trigger {
select {
case <-child.dying:
close(child.trigger)
case <-time.After(child.conf.Consumer.Retry.Backoff):
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
child.broker = nil
}
Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
if err := child.dispatch(); err != nil {
child.sendError(err)
child.trigger <- none{}
}
}
}
if child.broker != nil {
child.consumer.unrefBrokerConsumer(child.broker)
}
child.consumer.removeChild(child)
close(child.feeder)
}
func (child *partitionConsumer) dispatch() error {
if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
return err
}
var leader *Broker
var err error
if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
return err
}
child.broker = child.consumer.refBrokerConsumer(leader)
child.broker.input <- child
return nil
}
func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
if err != nil {
return err
}
oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
if err != nil {
return err
}
switch {
case offset == OffsetNewest:
child.offset = newestOffset
case offset == OffsetOldest:
child.offset = oldestOffset
case offset >= oldestOffset && offset <= newestOffset:
child.offset = offset
default:
return ErrOffsetOutOfRange
}
return nil
}
func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
return child.messages
}
func (child *partitionConsumer) Errors() <-chan *ConsumerError {
return child.errors
}
func (child *partitionConsumer) AsyncClose() {
// this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
// the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
// 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
// also just close itself)
close(child.dying)
}
func (child *partitionConsumer) Close() error {
child.AsyncClose()
go withRecover(func() {
for range child.messages {
// drain
}
})
var errors ConsumerErrors
for err := range child.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
func (child *partitionConsumer) HighWaterMarkOffset() int64 {
return atomic.LoadInt64(&child.highWaterMarkOffset)
}
func (child *partitionConsumer) responseFeeder() {
var msgs []*ConsumerMessage
expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
expireTimedOut := false
feederLoop:
for response := range child.feeder {
msgs, child.responseResult = child.parseResponse(response)
for i, msg := range msgs {
if !expiryTimer.Stop() && !expireTimedOut {
// expiryTimer was expired; clear out the waiting msg
<-expiryTimer.C
}
expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
expireTimedOut = false
select {
case child.messages <- msg:
case <-expiryTimer.C:
expireTimedOut = true
child.responseResult = errTimedOut
child.broker.acks.Done()
for _, msg = range msgs[i:] {
child.messages <- msg
}
child.broker.input <- child
continue feederLoop
}
}
child.broker.acks.Done()
}
close(child.messages)
close(child.errors)
}
func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
block := response.GetBlock(child.topic, child.partition)
if block == nil {
return nil, ErrIncompleteResponse
}
if block.Err != ErrNoError {
return nil, block.Err
}
if len(block.MsgSet.Messages) == 0 {
// We got no messages. If we got a trailing one then we need to ask for more data.
// Otherwise we just poll again and wait for one to be produced...
if block.MsgSet.PartialTrailingMessage {
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
// we can't ask for more data, we've hit the configured limit
child.sendError(ErrMessageTooLarge)
child.offset++ // skip this one so we can keep processing future messages
} else {
child.fetchSize *= 2
if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
child.fetchSize = child.conf.Consumer.Fetch.Max
}
}
}
return nil, nil
}
// we got messages, reset our fetch size in case it was increased for a previous request
child.fetchSize = child.conf.Consumer.Fetch.Default
atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
incomplete := false
prelude := true
var messages []*ConsumerMessage
for _, msgBlock := range block.MsgSet.Messages {
for _, msg := range msgBlock.Messages() {
offset := msg.Offset
if msg.Msg.Version >= 1 {
baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
offset += baseOffset
}
if prelude && offset < child.offset {
continue
}
prelude = false
if offset >= child.offset {
messages = append(messages, &ConsumerMessage{
Topic: child.topic,
Partition: child.partition,
Key: msg.Msg.Key,
Value: msg.Msg.Value,
Offset: offset,
Timestamp: msg.Msg.Timestamp,
BlockTimestamp: msgBlock.Msg.Timestamp,
})
child.offset = offset + 1
} else {
incomplete = true
}
}
}
if incomplete || len(messages) == 0 {
return nil, ErrIncompleteResponse
}
return messages, nil
}
// brokerConsumer
type brokerConsumer struct {
consumer *consumer
broker *Broker
input chan *partitionConsumer
newSubscriptions chan []*partitionConsumer
wait chan none
subscriptions map[*partitionConsumer]none
acks sync.WaitGroup
refs int
}
func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
bc := &brokerConsumer{
consumer: c,
broker: broker,
input: make(chan *partitionConsumer),
newSubscriptions: make(chan []*partitionConsumer),
wait: make(chan none),
subscriptions: make(map[*partitionConsumer]none),
refs: 0,
}
go withRecover(bc.subscriptionManager)
go withRecover(bc.subscriptionConsumer)
return bc
}
func (bc *brokerConsumer) subscriptionManager() {
var buffer []*partitionConsumer
// The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
// goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
// up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
// it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
// so the main goroutine can block waiting for work if it has none.
for {
if len(buffer) > 0 {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- buffer:
buffer = nil
case bc.wait <- none{}:
}
} else {
select {
case event, ok := <-bc.input:
if !ok {
goto done
}
buffer = append(buffer, event)
case bc.newSubscriptions <- nil:
}
}
}
done:
close(bc.wait)
if len(buffer) > 0 {
bc.newSubscriptions <- buffer
}
close(bc.newSubscriptions)
}
func (bc *brokerConsumer) subscriptionConsumer() {
<-bc.wait // wait for our first piece of work
// the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
for newSubscriptions := range bc.newSubscriptions {
bc.updateSubscriptions(newSubscriptions)
if len(bc.subscriptions) == 0 {
// We're about to be shut down or we're about to receive more subscriptions.
// Either way, the signal just hasn't propagated to our goroutine yet.
<-bc.wait
continue
}
response, err := bc.fetchNewMessages()
if err != nil {
Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
bc.abort(err)
return
}
bc.acks.Add(len(bc.subscriptions))
for child := range bc.subscriptions {
child.feeder <- response
}
bc.acks.Wait()
bc.handleResponses()
}
}
func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
for _, child := range newSubscriptions {
bc.subscriptions[child] = none{}
Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
}
for child := range bc.subscriptions {
select {
case <-child.dying:
Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
close(child.trigger)
delete(bc.subscriptions, child)
default:
break
}
}
}
func (bc *brokerConsumer) handleResponses() {
// handles the response codes left for us by our subscriptions, and abandons ones that have been closed
for child := range bc.subscriptions {
result := child.responseResult
child.responseResult = nil
switch result {
case nil:
break
case errTimedOut:
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
bc.broker.ID(), child.topic, child.partition)
delete(bc.subscriptions, child)
case ErrOffsetOutOfRange:
// there's no point in retrying this it will just fail the same way again
// shut it down and force the user to choose what to do
child.sendError(result)
Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
close(child.trigger)
delete(bc.subscriptions, child)
case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
// not an error, but does need redispatching
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
default:
// dunno, tell the user and try redispatching
child.sendError(result)
Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
bc.broker.ID(), child.topic, child.partition, result)
child.trigger <- none{}
delete(bc.subscriptions, child)
}
}
}
func (bc *brokerConsumer) abort(err error) {
bc.consumer.abandonBrokerConsumer(bc)
_ = bc.broker.Close() // we don't care about the error this might return, we already have one
for child := range bc.subscriptions {
child.sendError(err)
child.trigger <- none{}
}
for newSubscriptions := range bc.newSubscriptions {
if len(newSubscriptions) == 0 {
<-bc.wait
continue
}
for _, child := range newSubscriptions {
child.sendError(err)
child.trigger <- none{}
}
}
}
func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
request := &FetchRequest{
MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
request.Version = 2
}
if bc.consumer.conf.Version.IsAtLeast(V0_10_1_0) {
request.Version = 3
request.MaxBytes = MaxResponseSize
}
for child := range bc.subscriptions {
request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
}
return bc.broker.Fetch(request)
}

View File

@ -0,0 +1,94 @@
package sarama
type ConsumerGroupMemberMetadata struct {
Version int16
Topics []string
UserData []byte
}
func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putStringArray(m.Topics); err != nil {
return err
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
if m.Topics, err = pd.getStringArray(); err != nil {
return
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}
type ConsumerGroupMemberAssignment struct {
Version int16
Topics map[string][]int32
UserData []byte
}
func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
pe.putInt16(m.Version)
if err := pe.putArrayLength(len(m.Topics)); err != nil {
return err
}
for topic, partitions := range m.Topics {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putInt32Array(partitions); err != nil {
return err
}
}
if err := pe.putBytes(m.UserData); err != nil {
return err
}
return nil
}
func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
if m.Version, err = pd.getInt16(); err != nil {
return
}
var topicLen int
if topicLen, err = pd.getArrayLength(); err != nil {
return
}
m.Topics = make(map[string][]int32, topicLen)
for i := 0; i < topicLen; i++ {
var topic string
if topic, err = pd.getString(); err != nil {
return
}
if m.Topics[topic], err = pd.getInt32Array(); err != nil {
return
}
}
if m.UserData, err = pd.getBytes(); err != nil {
return
}
return nil
}

View File

@ -0,0 +1,26 @@
package sarama
type ConsumerMetadataRequest struct {
ConsumerGroup string
}
func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
return pe.putString(r.ConsumerGroup)
}
func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
r.ConsumerGroup, err = pd.getString()
return err
}
func (r *ConsumerMetadataRequest) key() int16 {
return 10
}
func (r *ConsumerMetadataRequest) version() int16 {
return 0
}
func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
return V0_8_2_0
}

View File

@ -0,0 +1,85 @@
package sarama
import (
"net"
"strconv"
)
type ConsumerMetadataResponse struct {
Err KError
Coordinator *Broker
CoordinatorID int32 // deprecated: use Coordinator.ID()
CoordinatorHost string // deprecated: use Coordinator.Addr()
CoordinatorPort int32 // deprecated: use Coordinator.Addr()
}
func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(tmp)
coordinator := new(Broker)
if err := coordinator.decode(pd); err != nil {
return err
}
if coordinator.addr == ":0" {
return nil
}
r.Coordinator = coordinator
// this can all go away in 2.0, but we have to fill in deprecated fields to maintain
// backwards compatibility
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
r.CoordinatorID = r.Coordinator.ID()
r.CoordinatorHost = host
r.CoordinatorPort = int32(port)
return nil
}
func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if r.Coordinator != nil {
host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
if err != nil {
return err
}
port, err := strconv.ParseInt(portstr, 10, 32)
if err != nil {
return err
}
pe.putInt32(r.Coordinator.ID())
if err := pe.putString(host); err != nil {
return err
}
pe.putInt32(int32(port))
return nil
}
pe.putInt32(r.CoordinatorID)
if err := pe.putString(r.CoordinatorHost); err != nil {
return err
}
pe.putInt32(r.CoordinatorPort)
return nil
}
func (r *ConsumerMetadataResponse) key() int16 {
return 10
}
func (r *ConsumerMetadataResponse) version() int16 {
return 0
}
func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
return V0_8_2_0
}

35
vendor/github.com/Shopify/sarama/crc32_field.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
package sarama
import (
"encoding/binary"
"hash/crc32"
)
// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
type crc32Field struct {
startOffset int
}
func (c *crc32Field) saveOffset(in int) {
c.startOffset = in
}
func (c *crc32Field) reserveLength() int {
return 4
}
func (c *crc32Field) run(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
return nil
}
func (c *crc32Field) check(curOffset int, buf []byte) error {
crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
return PacketDecodingError{"CRC didn't match"}
}
return nil
}

View File

@ -0,0 +1,30 @@
package sarama
type DescribeGroupsRequest struct {
Groups []string
}
func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
return pe.putStringArray(r.Groups)
}
func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
r.Groups, err = pd.getStringArray()
return
}
func (r *DescribeGroupsRequest) key() int16 {
return 15
}
func (r *DescribeGroupsRequest) version() int16 {
return 0
}
func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *DescribeGroupsRequest) AddGroup(group string) {
r.Groups = append(r.Groups, group)
}

View File

@ -0,0 +1,187 @@
package sarama
type DescribeGroupsResponse struct {
Groups []*GroupDescription
}
func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for _, groupDescription := range r.Groups {
if err := groupDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Groups = make([]*GroupDescription, n)
for i := 0; i < n; i++ {
r.Groups[i] = new(GroupDescription)
if err := r.Groups[i].decode(pd); err != nil {
return err
}
}
return nil
}
func (r *DescribeGroupsResponse) key() int16 {
return 15
}
func (r *DescribeGroupsResponse) version() int16 {
return 0
}
func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}
type GroupDescription struct {
Err KError
GroupId string
State string
ProtocolType string
Protocol string
Members map[string]*GroupMemberDescription
}
func (gd *GroupDescription) encode(pe packetEncoder) error {
pe.putInt16(int16(gd.Err))
if err := pe.putString(gd.GroupId); err != nil {
return err
}
if err := pe.putString(gd.State); err != nil {
return err
}
if err := pe.putString(gd.ProtocolType); err != nil {
return err
}
if err := pe.putString(gd.Protocol); err != nil {
return err
}
if err := pe.putArrayLength(len(gd.Members)); err != nil {
return err
}
for memberId, groupMemberDescription := range gd.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := groupMemberDescription.encode(pe); err != nil {
return err
}
}
return nil
}
func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
gd.Err = KError(kerr)
if gd.GroupId, err = pd.getString(); err != nil {
return
}
if gd.State, err = pd.getString(); err != nil {
return
}
if gd.ProtocolType, err = pd.getString(); err != nil {
return
}
if gd.Protocol, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
gd.Members = make(map[string]*GroupMemberDescription)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
gd.Members[memberId] = new(GroupMemberDescription)
if err := gd.Members[memberId].decode(pd); err != nil {
return err
}
}
return nil
}
type GroupMemberDescription struct {
ClientId string
ClientHost string
MemberMetadata []byte
MemberAssignment []byte
}
func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
if err := pe.putString(gmd.ClientId); err != nil {
return err
}
if err := pe.putString(gmd.ClientHost); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberMetadata); err != nil {
return err
}
if err := pe.putBytes(gmd.MemberAssignment); err != nil {
return err
}
return nil
}
func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
if gmd.ClientId, err = pd.getString(); err != nil {
return
}
if gmd.ClientHost, err = pd.getString(); err != nil {
return
}
if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
return
}
if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
return
}
return nil
}
func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(gmd.MemberAssignment, assignment)
return assignment, err
}
func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
metadata := new(ConsumerGroupMemberMetadata)
err := decode(gmd.MemberMetadata, metadata)
return metadata, err
}

89
vendor/github.com/Shopify/sarama/encoder_decoder.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package sarama
import (
"fmt"
"github.com/rcrowley/go-metrics"
)
// Encoder is the interface that wraps the basic Encode method.
// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
type encoder interface {
encode(pe packetEncoder) error
}
// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
if e == nil {
return nil, nil
}
var prepEnc prepEncoder
var realEnc realEncoder
err := e.encode(&prepEnc)
if err != nil {
return nil, err
}
if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
}
realEnc.raw = make([]byte, prepEnc.length)
realEnc.registry = metricRegistry
err = e.encode(&realEnc)
if err != nil {
return nil, err
}
return realEnc.raw, nil
}
// Decoder is the interface that wraps the basic Decode method.
// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
type decoder interface {
decode(pd packetDecoder) error
}
type versionedDecoder interface {
decode(pd packetDecoder, version int16) error
}
// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
// interpreted using Kafka's encoding rules.
func decode(buf []byte, in decoder) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}
func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
if buf == nil {
return nil
}
helper := realDecoder{raw: buf}
err := in.decode(&helper, version)
if err != nil {
return err
}
if helper.off != len(buf) {
return PacketDecodingError{"invalid length"}
}
return nil
}

221
vendor/github.com/Shopify/sarama/errors.go generated vendored Normal file
View File

@ -0,0 +1,221 @@
package sarama
import (
"errors"
"fmt"
)
// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
// or otherwise failed to respond.
var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
// ErrClosedClient is the error returned when a method is called on a client that has been closed.
var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
// not contain the expected information.
var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
// (meaning one outside of the range [0...numPartitions-1]).
var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
var ErrNotConnected = errors.New("kafka: broker not connected")
// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
// of the message set.
var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
// ErrShuttingDown is returned when a producer receives a message during shutdown.
var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
type PacketEncodingError struct {
Info string
}
func (err PacketEncodingError) Error() string {
return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
}
// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
// This can be a bad CRC or length field, or any other invalid value.
type PacketDecodingError struct {
Info string
}
func (err PacketDecodingError) Error() string {
return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
}
// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
// when the specified configuration is invalid.
type ConfigurationError string
func (err ConfigurationError) Error() string {
return "kafka: invalid configuration (" + string(err) + ")"
}
// KError is the type of error that can be returned directly by the Kafka broker.
// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
type KError int16
// Numeric error codes returned by the Kafka server.
const (
ErrNoError KError = 0
ErrUnknown KError = -1
ErrOffsetOutOfRange KError = 1
ErrInvalidMessage KError = 2
ErrUnknownTopicOrPartition KError = 3
ErrInvalidMessageSize KError = 4
ErrLeaderNotAvailable KError = 5
ErrNotLeaderForPartition KError = 6
ErrRequestTimedOut KError = 7
ErrBrokerNotAvailable KError = 8
ErrReplicaNotAvailable KError = 9
ErrMessageSizeTooLarge KError = 10
ErrStaleControllerEpochCode KError = 11
ErrOffsetMetadataTooLarge KError = 12
ErrNetworkException KError = 13
ErrOffsetsLoadInProgress KError = 14
ErrConsumerCoordinatorNotAvailable KError = 15
ErrNotCoordinatorForConsumer KError = 16
ErrInvalidTopic KError = 17
ErrMessageSetSizeTooLarge KError = 18
ErrNotEnoughReplicas KError = 19
ErrNotEnoughReplicasAfterAppend KError = 20
ErrInvalidRequiredAcks KError = 21
ErrIllegalGeneration KError = 22
ErrInconsistentGroupProtocol KError = 23
ErrInvalidGroupId KError = 24
ErrUnknownMemberId KError = 25
ErrInvalidSessionTimeout KError = 26
ErrRebalanceInProgress KError = 27
ErrInvalidCommitOffsetSize KError = 28
ErrTopicAuthorizationFailed KError = 29
ErrGroupAuthorizationFailed KError = 30
ErrClusterAuthorizationFailed KError = 31
ErrInvalidTimestamp KError = 32
ErrUnsupportedSASLMechanism KError = 33
ErrIllegalSASLState KError = 34
ErrUnsupportedVersion KError = 35
ErrTopicAlreadyExists KError = 36
ErrInvalidPartitions KError = 37
ErrInvalidReplicationFactor KError = 38
ErrInvalidReplicaAssignment KError = 39
ErrInvalidConfig KError = 40
ErrNotController KError = 41
ErrInvalidRequest KError = 42
ErrUnsupportedForMessageFormat KError = 43
ErrPolicyViolation KError = 44
)
func (err KError) Error() string {
// Error messages stolen/adapted from
// https://kafka.apache.org/protocol#protocol_error_codes
switch err {
case ErrNoError:
return "kafka server: Not an error, why are you printing me?"
case ErrUnknown:
return "kafka server: Unexpected (unknown?) server error."
case ErrOffsetOutOfRange:
return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
case ErrInvalidMessage:
return "kafka server: Message contents does not match its CRC."
case ErrUnknownTopicOrPartition:
return "kafka server: Request was for a topic or partition that does not exist on this broker."
case ErrInvalidMessageSize:
return "kafka server: The message has a negative size."
case ErrLeaderNotAvailable:
return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
case ErrNotLeaderForPartition:
return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
case ErrRequestTimedOut:
return "kafka server: Request exceeded the user-specified time limit in the request."
case ErrBrokerNotAvailable:
return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
case ErrReplicaNotAvailable:
return "kafka server: Replica information not available, one or more brokers are down."
case ErrMessageSizeTooLarge:
return "kafka server: Message was too large, server rejected it to avoid allocation error."
case ErrStaleControllerEpochCode:
return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
case ErrOffsetMetadataTooLarge:
return "kafka server: Specified a string larger than the configured maximum for offset metadata."
case ErrNetworkException:
return "kafka server: The server disconnected before a response was received."
case ErrOffsetsLoadInProgress:
return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
case ErrConsumerCoordinatorNotAvailable:
return "kafka server: Offset's topic has not yet been created."
case ErrNotCoordinatorForConsumer:
return "kafka server: Request was for a consumer group that is not coordinated by this broker."
case ErrInvalidTopic:
return "kafka server: The request attempted to perform an operation on an invalid topic."
case ErrMessageSetSizeTooLarge:
return "kafka server: The request included message batch larger than the configured segment size on the server."
case ErrNotEnoughReplicas:
return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
case ErrNotEnoughReplicasAfterAppend:
return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
case ErrInvalidRequiredAcks:
return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
case ErrIllegalGeneration:
return "kafka server: The provided generation id is not the current generation."
case ErrInconsistentGroupProtocol:
return "kafka server: The provider group protocol type is incompatible with the other members."
case ErrInvalidGroupId:
return "kafka server: The provided group id was empty."
case ErrUnknownMemberId:
return "kafka server: The provided member is not known in the current generation."
case ErrInvalidSessionTimeout:
return "kafka server: The provided session timeout is outside the allowed range."
case ErrRebalanceInProgress:
return "kafka server: A rebalance for the group is in progress. Please re-join the group."
case ErrInvalidCommitOffsetSize:
return "kafka server: The provided commit metadata was too large."
case ErrTopicAuthorizationFailed:
return "kafka server: The client is not authorized to access this topic."
case ErrGroupAuthorizationFailed:
return "kafka server: The client is not authorized to access this group."
case ErrClusterAuthorizationFailed:
return "kafka server: The client is not authorized to send this request type."
case ErrInvalidTimestamp:
return "kafka server: The timestamp of the message is out of acceptable range."
case ErrUnsupportedSASLMechanism:
return "kafka server: The broker does not support the requested SASL mechanism."
case ErrIllegalSASLState:
return "kafka server: Request is not valid given the current SASL state."
case ErrUnsupportedVersion:
return "kafka server: The version of API is not supported."
case ErrTopicAlreadyExists:
return "kafka server: Topic with this name already exists."
case ErrInvalidPartitions:
return "kafka server: Number of partitions is invalid."
case ErrInvalidReplicationFactor:
return "kafka server: Replication-factor is invalid."
case ErrInvalidReplicaAssignment:
return "kafka server: Replica assignment is invalid."
case ErrInvalidConfig:
return "kafka server: Configuration is invalid."
case ErrNotController:
return "kafka server: This is not the correct controller for this cluster."
case ErrInvalidRequest:
return "kafka server: This most likely occurs because of a request being malformed by the client library or the message was sent to an incompatible broker. See the broker logs for more details."
case ErrUnsupportedForMessageFormat:
return "kafka server: The requested operation is not supported by the message format version."
case ErrPolicyViolation:
return "kafka server: Request parameters do not satisfy the configured policy."
}
return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
}

150
vendor/github.com/Shopify/sarama/fetch_request.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package sarama
type fetchRequestBlock struct {
fetchOffset int64
maxBytes int32
}
func (b *fetchRequestBlock) encode(pe packetEncoder) error {
pe.putInt64(b.fetchOffset)
pe.putInt32(b.maxBytes)
return nil
}
func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
if b.fetchOffset, err = pd.getInt64(); err != nil {
return err
}
if b.maxBytes, err = pd.getInt32(); err != nil {
return err
}
return nil
}
// FetchRequest (API key 1) will fetch Kafka messages. Version 3 introduced the MaxBytes field. See
// https://issues.apache.org/jira/browse/KAFKA-2063 for a discussion of the issues leading up to that. The KIP is at
// https://cwiki.apache.org/confluence/display/KAFKA/KIP-74%3A+Add+Fetch+Response+Size+Limit+in+Bytes
type FetchRequest struct {
MaxWaitTime int32
MinBytes int32
MaxBytes int32
Version int16
blocks map[string]map[int32]*fetchRequestBlock
}
func (r *FetchRequest) encode(pe packetEncoder) (err error) {
pe.putInt32(-1) // replica ID is always -1 for clients
pe.putInt32(r.MaxWaitTime)
pe.putInt32(r.MinBytes)
if r.Version == 3 {
pe.putInt32(r.MaxBytes)
}
err = pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, blocks := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(blocks))
if err != nil {
return err
}
for partition, block := range blocks {
pe.putInt32(partition)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if _, err = pd.getInt32(); err != nil {
return err
}
if r.MaxWaitTime, err = pd.getInt32(); err != nil {
return err
}
if r.MinBytes, err = pd.getInt32(); err != nil {
return err
}
if r.Version == 3 {
if r.MaxBytes, err = pd.getInt32(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
fetchBlock := &fetchRequestBlock{}
if err = fetchBlock.decode(pd); err != nil {
return err
}
r.blocks[topic][partition] = fetchBlock
}
}
return nil
}
func (r *FetchRequest) key() int16 {
return 1
}
func (r *FetchRequest) version() int16 {
return r.Version
}
func (r *FetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
case 3:
return V0_10_1_0
default:
return minVersion
}
}
func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*fetchRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*fetchRequestBlock)
}
tmp := new(fetchRequestBlock)
tmp.maxBytes = maxBytes
tmp.fetchOffset = fetchOffset
r.blocks[topic][partitionID] = tmp
}

210
vendor/github.com/Shopify/sarama/fetch_response.go generated vendored Normal file
View File

@ -0,0 +1,210 @@
package sarama
import "time"
type FetchResponseBlock struct {
Err KError
HighWaterMarkOffset int64
MsgSet MessageSet
}
func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.HighWaterMarkOffset, err = pd.getInt64()
if err != nil {
return err
}
msgSetSize, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(msgSetSize))
if err != nil {
return err
}
err = (&b.MsgSet).decode(msgSetDecoder)
return err
}
func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(b.Err))
pe.putInt64(b.HighWaterMarkOffset)
pe.push(&lengthField{})
err = b.MsgSet.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
type FetchResponse struct {
Blocks map[string]map[int32]*FetchResponseBlock
ThrottleTime time.Duration
Version int16 // v1 requires 0.9+, v2 requires 0.10+
}
func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.Version >= 1 {
throttle, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(throttle) * time.Millisecond
}
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(FetchResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *FetchResponse) encode(pe packetEncoder) (err error) {
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
err = pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, block := range partitions {
pe.putInt32(id)
err = block.encode(pe)
if err != nil {
return err
}
}
}
return nil
}
func (r *FetchResponse) key() int16 {
return 1
}
func (r *FetchResponse) version() int16 {
return r.Version
}
func (r *FetchResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
default:
return minVersion
}
}
func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
frb.Err = err
}
func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
}
partitions, ok := r.Blocks[topic]
if !ok {
partitions = make(map[int32]*FetchResponseBlock)
r.Blocks[topic] = partitions
}
frb, ok := partitions[partition]
if !ok {
frb = new(FetchResponseBlock)
partitions[partition] = frb
}
var kb []byte
var vb []byte
if key != nil {
kb, _ = key.Encode()
}
if value != nil {
vb, _ = value.Encode()
}
msg := &Message{Key: kb, Value: vb}
msgBlock := &MessageBlock{Msg: msg, Offset: offset}
frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
}

47
vendor/github.com/Shopify/sarama/heartbeat_request.go generated vendored Normal file
View File

@ -0,0 +1,47 @@
package sarama
type HeartbeatRequest struct {
GroupId string
GenerationId int32
MemberId string
}
func (r *HeartbeatRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.GenerationId)
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *HeartbeatRequest) key() int16 {
return 12
}
func (r *HeartbeatRequest) version() int16 {
return 0
}
func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

32
vendor/github.com/Shopify/sarama/heartbeat_response.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package sarama
type HeartbeatResponse struct {
Err KError
}
func (r *HeartbeatResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *HeartbeatResponse) key() int16 {
return 12
}
func (r *HeartbeatResponse) version() int16 {
return 0
}
func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

143
vendor/github.com/Shopify/sarama/join_group_request.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
package sarama
type GroupProtocol struct {
Name string
Metadata []byte
}
func (p *GroupProtocol) decode(pd packetDecoder) (err error) {
p.Name, err = pd.getString()
if err != nil {
return err
}
p.Metadata, err = pd.getBytes()
return err
}
func (p *GroupProtocol) encode(pe packetEncoder) (err error) {
if err := pe.putString(p.Name); err != nil {
return err
}
if err := pe.putBytes(p.Metadata); err != nil {
return err
}
return nil
}
type JoinGroupRequest struct {
GroupId string
SessionTimeout int32
MemberId string
ProtocolType string
GroupProtocols map[string][]byte // deprecated; use OrderedGroupProtocols
OrderedGroupProtocols []*GroupProtocol
}
func (r *JoinGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.SessionTimeout)
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putString(r.ProtocolType); err != nil {
return err
}
if len(r.GroupProtocols) > 0 {
if len(r.OrderedGroupProtocols) > 0 {
return PacketDecodingError{"cannot specify both GroupProtocols and OrderedGroupProtocols on JoinGroupRequest"}
}
if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
return err
}
for name, metadata := range r.GroupProtocols {
if err := pe.putString(name); err != nil {
return err
}
if err := pe.putBytes(metadata); err != nil {
return err
}
}
} else {
if err := pe.putArrayLength(len(r.OrderedGroupProtocols)); err != nil {
return err
}
for _, protocol := range r.OrderedGroupProtocols {
if err := protocol.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.SessionTimeout, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
if r.ProtocolType, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupProtocols = make(map[string][]byte)
for i := 0; i < n; i++ {
protocol := &GroupProtocol{}
if err := protocol.decode(pd); err != nil {
return err
}
r.GroupProtocols[protocol.Name] = protocol.Metadata
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, protocol)
}
return nil
}
func (r *JoinGroupRequest) key() int16 {
return 11
}
func (r *JoinGroupRequest) version() int16 {
return 0
}
func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
r.OrderedGroupProtocols = append(r.OrderedGroupProtocols, &GroupProtocol{
Name: name,
Metadata: metadata,
})
}
func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
bin, err := encode(metadata, nil)
if err != nil {
return err
}
r.AddGroupProtocol(name, bin)
return nil
}

115
vendor/github.com/Shopify/sarama/join_group_response.go generated vendored Normal file
View File

@ -0,0 +1,115 @@
package sarama
type JoinGroupResponse struct {
Err KError
GenerationId int32
GroupProtocol string
LeaderId string
MemberId string
Members map[string][]byte
}
func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
for id, bin := range r.Members {
meta := new(ConsumerGroupMemberMetadata)
if err := decode(bin, meta); err != nil {
return nil, err
}
members[id] = *meta
}
return members, nil
}
func (r *JoinGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
pe.putInt32(r.GenerationId)
if err := pe.putString(r.GroupProtocol); err != nil {
return err
}
if err := pe.putString(r.LeaderId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putArrayLength(len(r.Members)); err != nil {
return err
}
for memberId, memberMetadata := range r.Members {
if err := pe.putString(memberId); err != nil {
return err
}
if err := pe.putBytes(memberMetadata); err != nil {
return err
}
}
return nil
}
func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.GroupProtocol, err = pd.getString(); err != nil {
return
}
if r.LeaderId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Members = make(map[string][]byte)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
memberMetadata, err := pd.getBytes()
if err != nil {
return err
}
r.Members[memberId] = memberMetadata
}
return nil
}
func (r *JoinGroupResponse) key() int16 {
return 11
}
func (r *JoinGroupResponse) version() int16 {
return 0
}
func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,40 @@
package sarama
type LeaveGroupRequest struct {
GroupId string
MemberId string
}
func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
if err := pe.putString(r.MemberId); err != nil {
return err
}
return nil
}
func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
return nil
}
func (r *LeaveGroupRequest) key() int16 {
return 13
}
func (r *LeaveGroupRequest) version() int16 {
return 0
}
func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,32 @@
package sarama
type LeaveGroupResponse struct {
Err KError
}
func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return nil
}
func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
return nil
}
func (r *LeaveGroupResponse) key() int16 {
return 13
}
func (r *LeaveGroupResponse) version() int16 {
return 0
}
func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

29
vendor/github.com/Shopify/sarama/length_field.go generated vendored Normal file
View File

@ -0,0 +1,29 @@
package sarama
import "encoding/binary"
// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
type lengthField struct {
startOffset int
}
func (l *lengthField) saveOffset(in int) {
l.startOffset = in
}
func (l *lengthField) reserveLength() int {
return 4
}
func (l *lengthField) run(curOffset int, buf []byte) error {
binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
return nil
}
func (l *lengthField) check(curOffset int, buf []byte) error {
if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
return PacketDecodingError{"length field invalid"}
}
return nil
}

View File

@ -0,0 +1,24 @@
package sarama
type ListGroupsRequest struct {
}
func (r *ListGroupsRequest) encode(pe packetEncoder) error {
return nil
}
func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
return nil
}
func (r *ListGroupsRequest) key() int16 {
return 16
}
func (r *ListGroupsRequest) version() int16 {
return 0
}
func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}

View File

@ -0,0 +1,69 @@
package sarama
type ListGroupsResponse struct {
Err KError
Groups map[string]string
}
func (r *ListGroupsResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
if err := pe.putArrayLength(len(r.Groups)); err != nil {
return err
}
for groupId, protocolType := range r.Groups {
if err := pe.putString(groupId); err != nil {
return err
}
if err := pe.putString(protocolType); err != nil {
return err
}
}
return nil
}
func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.Groups = make(map[string]string)
for i := 0; i < n; i++ {
groupId, err := pd.getString()
if err != nil {
return err
}
protocolType, err := pd.getString()
if err != nil {
return err
}
r.Groups[groupId] = protocolType
}
return nil
}
func (r *ListGroupsResponse) key() int16 {
return 16
}
func (r *ListGroupsResponse) version() int16 {
return 0
}
func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

212
vendor/github.com/Shopify/sarama/message.go generated vendored Normal file
View File

@ -0,0 +1,212 @@
package sarama
import (
"bytes"
"compress/gzip"
"fmt"
"io/ioutil"
"time"
"github.com/eapache/go-xerial-snappy"
"github.com/pierrec/lz4"
)
// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
type CompressionCodec int8
// only the last two bits are really used
const compressionCodecMask int8 = 0x03
const (
CompressionNone CompressionCodec = 0
CompressionGZIP CompressionCodec = 1
CompressionSnappy CompressionCodec = 2
CompressionLZ4 CompressionCodec = 3
)
type Message struct {
Codec CompressionCodec // codec used to compress the message contents
Key []byte // the message key, may be nil
Value []byte // the message contents
Set *MessageSet // the message set a message might wrap
Version int8 // v1 requires Kafka 0.10
Timestamp time.Time // the timestamp of the message (version 1+ only)
compressedCache []byte
compressedSize int // used for computing the compression ratio metrics
}
func (m *Message) encode(pe packetEncoder) error {
pe.push(&crc32Field{})
pe.putInt8(m.Version)
attributes := int8(m.Codec) & compressionCodecMask
pe.putInt8(attributes)
if m.Version >= 1 {
timestamp := int64(-1)
if !m.Timestamp.Before(time.Unix(0, 0)) {
timestamp = m.Timestamp.UnixNano() / int64(time.Millisecond)
} else if !m.Timestamp.IsZero() {
return PacketEncodingError{fmt.Sprintf("invalid timestamp (%v)", m.Timestamp)}
}
pe.putInt64(timestamp)
}
err := pe.putBytes(m.Key)
if err != nil {
return err
}
var payload []byte
if m.compressedCache != nil {
payload = m.compressedCache
m.compressedCache = nil
} else if m.Value != nil {
switch m.Codec {
case CompressionNone:
payload = m.Value
case CompressionGZIP:
var buf bytes.Buffer
writer := gzip.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
case CompressionSnappy:
tmp := snappy.Encode(m.Value)
m.compressedCache = tmp
payload = m.compressedCache
case CompressionLZ4:
var buf bytes.Buffer
writer := lz4.NewWriter(&buf)
if _, err = writer.Write(m.Value); err != nil {
return err
}
if err = writer.Close(); err != nil {
return err
}
m.compressedCache = buf.Bytes()
payload = m.compressedCache
default:
return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
}
// Keep in mind the compressed payload size for metric gathering
m.compressedSize = len(payload)
}
if err = pe.putBytes(payload); err != nil {
return err
}
return pe.pop()
}
func (m *Message) decode(pd packetDecoder) (err error) {
err = pd.push(&crc32Field{})
if err != nil {
return err
}
m.Version, err = pd.getInt8()
if err != nil {
return err
}
attribute, err := pd.getInt8()
if err != nil {
return err
}
m.Codec = CompressionCodec(attribute & compressionCodecMask)
if m.Version >= 1 {
millis, err := pd.getInt64()
if err != nil {
return err
}
// negative timestamps are invalid, in these cases we should return
// a zero time
timestamp := time.Time{}
if millis >= 0 {
timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
m.Timestamp = timestamp
}
m.Key, err = pd.getBytes()
if err != nil {
return err
}
m.Value, err = pd.getBytes()
if err != nil {
return err
}
// Required for deep equal assertion during tests but might be useful
// for future metrics about the compression ratio in fetch requests
m.compressedSize = len(m.Value)
switch m.Codec {
case CompressionNone:
// nothing to do
case CompressionGZIP:
if m.Value == nil {
break
}
reader, err := gzip.NewReader(bytes.NewReader(m.Value))
if err != nil {
return err
}
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionSnappy:
if m.Value == nil {
break
}
if m.Value, err = snappy.Decode(m.Value); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
case CompressionLZ4:
if m.Value == nil {
break
}
reader := lz4.NewReader(bytes.NewReader(m.Value))
if m.Value, err = ioutil.ReadAll(reader); err != nil {
return err
}
if err := m.decodeSet(); err != nil {
return err
}
default:
return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
}
return pd.pop()
}
// decodes a message set from a previousy encoded bulk-message
func (m *Message) decodeSet() (err error) {
pd := realDecoder{raw: m.Value}
m.Set = &MessageSet{}
return m.Set.decode(&pd)
}

89
vendor/github.com/Shopify/sarama/message_set.go generated vendored Normal file
View File

@ -0,0 +1,89 @@
package sarama
type MessageBlock struct {
Offset int64
Msg *Message
}
// Messages convenience helper which returns either all the
// messages that are wrapped in this block
func (msb *MessageBlock) Messages() []*MessageBlock {
if msb.Msg.Set != nil {
return msb.Msg.Set.Messages
}
return []*MessageBlock{msb}
}
func (msb *MessageBlock) encode(pe packetEncoder) error {
pe.putInt64(msb.Offset)
pe.push(&lengthField{})
err := msb.Msg.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
if msb.Offset, err = pd.getInt64(); err != nil {
return err
}
if err = pd.push(&lengthField{}); err != nil {
return err
}
msb.Msg = new(Message)
if err = msb.Msg.decode(pd); err != nil {
return err
}
if err = pd.pop(); err != nil {
return err
}
return nil
}
type MessageSet struct {
PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
Messages []*MessageBlock
}
func (ms *MessageSet) encode(pe packetEncoder) error {
for i := range ms.Messages {
err := ms.Messages[i].encode(pe)
if err != nil {
return err
}
}
return nil
}
func (ms *MessageSet) decode(pd packetDecoder) (err error) {
ms.Messages = nil
for pd.remaining() > 0 {
msb := new(MessageBlock)
err = msb.decode(pd)
switch err {
case nil:
ms.Messages = append(ms.Messages, msb)
case ErrInsufficientData:
// As an optimization the server is allowed to return a partial message at the
// end of the message set. Clients should handle this case. So we just ignore such things.
ms.PartialTrailingMessage = true
return nil
default:
return err
}
}
return nil
}
func (ms *MessageSet) addMessage(msg *Message) {
block := new(MessageBlock)
block.Msg = msg
ms.Messages = append(ms.Messages, block)
}

52
vendor/github.com/Shopify/sarama/metadata_request.go generated vendored Normal file
View File

@ -0,0 +1,52 @@
package sarama
type MetadataRequest struct {
Topics []string
}
func (r *MetadataRequest) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for i := range r.Topics {
err = pe.putString(r.Topics[i])
if err != nil {
return err
}
}
return nil
}
func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.Topics = make([]string, topicCount)
for i := range r.Topics {
topic, err := pd.getString()
if err != nil {
return err
}
r.Topics[i] = topic
}
return nil
}
func (r *MetadataRequest) key() int16 {
return 3
}
func (r *MetadataRequest) version() int16 {
return 0
}
func (r *MetadataRequest) requiredVersion() KafkaVersion {
return minVersion
}

239
vendor/github.com/Shopify/sarama/metadata_response.go generated vendored Normal file
View File

@ -0,0 +1,239 @@
package sarama
type PartitionMetadata struct {
Err KError
ID int32
Leader int32
Replicas []int32
Isr []int32
}
func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
pm.Err = KError(tmp)
pm.ID, err = pd.getInt32()
if err != nil {
return err
}
pm.Leader, err = pd.getInt32()
if err != nil {
return err
}
pm.Replicas, err = pd.getInt32Array()
if err != nil {
return err
}
pm.Isr, err = pd.getInt32Array()
if err != nil {
return err
}
return nil
}
func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(pm.Err))
pe.putInt32(pm.ID)
pe.putInt32(pm.Leader)
err = pe.putInt32Array(pm.Replicas)
if err != nil {
return err
}
err = pe.putInt32Array(pm.Isr)
if err != nil {
return err
}
return nil
}
type TopicMetadata struct {
Err KError
Name string
Partitions []*PartitionMetadata
}
func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
tm.Err = KError(tmp)
tm.Name, err = pd.getString()
if err != nil {
return err
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
tm.Partitions = make([]*PartitionMetadata, n)
for i := 0; i < n; i++ {
tm.Partitions[i] = new(PartitionMetadata)
err = tm.Partitions[i].decode(pd)
if err != nil {
return err
}
}
return nil
}
func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
pe.putInt16(int16(tm.Err))
err = pe.putString(tm.Name)
if err != nil {
return err
}
err = pe.putArrayLength(len(tm.Partitions))
if err != nil {
return err
}
for _, pm := range tm.Partitions {
err = pm.encode(pe)
if err != nil {
return err
}
}
return nil
}
type MetadataResponse struct {
Brokers []*Broker
Topics []*TopicMetadata
}
func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
n, err := pd.getArrayLength()
if err != nil {
return err
}
r.Brokers = make([]*Broker, n)
for i := 0; i < n; i++ {
r.Brokers[i] = new(Broker)
err = r.Brokers[i].decode(pd)
if err != nil {
return err
}
}
n, err = pd.getArrayLength()
if err != nil {
return err
}
r.Topics = make([]*TopicMetadata, n)
for i := 0; i < n; i++ {
r.Topics[i] = new(TopicMetadata)
err = r.Topics[i].decode(pd)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Brokers))
if err != nil {
return err
}
for _, broker := range r.Brokers {
err = broker.encode(pe)
if err != nil {
return err
}
}
err = pe.putArrayLength(len(r.Topics))
if err != nil {
return err
}
for _, tm := range r.Topics {
err = tm.encode(pe)
if err != nil {
return err
}
}
return nil
}
func (r *MetadataResponse) key() int16 {
return 3
}
func (r *MetadataResponse) version() int16 {
return 0
}
func (r *MetadataResponse) requiredVersion() KafkaVersion {
return minVersion
}
// testing API
func (r *MetadataResponse) AddBroker(addr string, id int32) {
r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
}
func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
var tmatch *TopicMetadata
for _, tm := range r.Topics {
if tm.Name == topic {
tmatch = tm
goto foundTopic
}
}
tmatch = new(TopicMetadata)
tmatch.Name = topic
r.Topics = append(r.Topics, tmatch)
foundTopic:
tmatch.Err = err
return tmatch
}
func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
tmatch := r.AddTopic(topic, ErrNoError)
var pmatch *PartitionMetadata
for _, pm := range tmatch.Partitions {
if pm.ID == partition {
pmatch = pm
goto foundPartition
}
}
pmatch = new(PartitionMetadata)
pmatch.ID = partition
tmatch.Partitions = append(tmatch.Partitions, pmatch)
foundPartition:
pmatch.Leader = brokerID
pmatch.Replicas = replicas
pmatch.Isr = isr
pmatch.Err = err
}

51
vendor/github.com/Shopify/sarama/metrics.go generated vendored Normal file
View File

@ -0,0 +1,51 @@
package sarama
import (
"fmt"
"strings"
"github.com/rcrowley/go-metrics"
)
// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
const (
metricsReservoirSize = 1028
metricsAlphaFactor = 0.015
)
func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
return r.GetOrRegister(name, func() metrics.Histogram {
return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
}).(metrics.Histogram)
}
func getMetricNameForBroker(name string, broker *Broker) string {
// Use broker id like the Java client as it does not contain '.' or ':' characters that
// can be interpreted as special character by monitoring tool (e.g. Graphite)
return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
}
func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
}
func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
}
func getMetricNameForTopic(name string, topic string) string {
// Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
// cf. KAFKA-1902 and KAFKA-2337
return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
}
func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
}
func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
}

324
vendor/github.com/Shopify/sarama/mockbroker.go generated vendored Normal file
View File

@ -0,0 +1,324 @@
package sarama
import (
"bytes"
"encoding/binary"
"fmt"
"io"
"net"
"reflect"
"strconv"
"sync"
"time"
"github.com/davecgh/go-spew/spew"
)
const (
expectationTimeout = 500 * time.Millisecond
)
type requestHandlerFunc func(req *request) (res encoder)
// RequestNotifierFunc is invoked when a mock broker processes a request successfully
// and will provides the number of bytes read and written.
type RequestNotifierFunc func(bytesRead, bytesWritten int)
// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
// to facilitate testing of higher level or specialized consumers and producers
// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
// but rather provides a facility to do that. It takes care of the TCP
// transport, request unmarshaling, response marshaling, and makes it the test
// writer responsibility to program correct according to the Kafka API protocol
// MockBroker behaviour.
//
// MockBroker is implemented as a TCP server listening on a kernel-selected
// localhost port that can accept many connections. It reads Kafka requests
// from that connection and returns responses programmed by the SetHandlerByMap
// function. If a MockBroker receives a request that it has no programmed
// response for, then it returns nothing and the request times out.
//
// A set of MockRequest builders to define mappings used by MockBroker is
// provided by Sarama. But users can develop MockRequests of their own and use
// them along with or instead of the standard ones.
//
// When running tests with MockBroker it is strongly recommended to specify
// a timeout to `go test` so that if the broker hangs waiting for a response,
// the test panics.
//
// It is not necessary to prefix message length or correlation ID to your
// response bytes, the server does that automatically as a convenience.
type MockBroker struct {
brokerID int32
port int32
closing chan none
stopper chan none
expectations chan encoder
listener net.Listener
t TestReporter
latency time.Duration
handler requestHandlerFunc
notifier RequestNotifierFunc
history []RequestResponse
lock sync.Mutex
}
// RequestResponse represents a Request/Response pair processed by MockBroker.
type RequestResponse struct {
Request protocolBody
Response encoder
}
// SetLatency makes broker pause for the specified period every time before
// replying.
func (b *MockBroker) SetLatency(latency time.Duration) {
b.latency = latency
}
// SetHandlerByMap defines mapping of Request types to MockResponses. When a
// request is received by the broker, it looks up the request type in the map
// and uses the found MockResponse instance to generate an appropriate reply.
// If the request type is not found in the map then nothing is sent.
func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
b.setHandler(func(req *request) (res encoder) {
reqTypeName := reflect.TypeOf(req.body).Elem().Name()
mockResponse := handlerMap[reqTypeName]
if mockResponse == nil {
return nil
}
return mockResponse.For(req.body)
})
}
// SetNotifier set a function that will get invoked whenever a request has been
// processed successfully and will provide the number of bytes read and written
func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
b.lock.Lock()
b.notifier = notifier
b.lock.Unlock()
}
// BrokerID returns broker ID assigned to the broker.
func (b *MockBroker) BrokerID() int32 {
return b.brokerID
}
// History returns a slice of RequestResponse pairs in the order they were
// processed by the broker. Note that in case of multiple connections to the
// broker the order expected by a test can be different from the order recorded
// in the history, unless some synchronization is implemented in the test.
func (b *MockBroker) History() []RequestResponse {
b.lock.Lock()
history := make([]RequestResponse, len(b.history))
copy(history, b.history)
b.lock.Unlock()
return history
}
// Port returns the TCP port number the broker is listening for requests on.
func (b *MockBroker) Port() int32 {
return b.port
}
// Addr returns the broker connection string in the form "<address>:<port>".
func (b *MockBroker) Addr() string {
return b.listener.Addr().String()
}
// Close terminates the broker blocking until it stops internal goroutines and
// releases all resources.
func (b *MockBroker) Close() {
close(b.expectations)
if len(b.expectations) > 0 {
buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
for e := range b.expectations {
_, _ = buf.WriteString(spew.Sdump(e))
}
b.t.Error(buf.String())
}
close(b.closing)
<-b.stopper
}
// setHandler sets the specified function as the request handler. Whenever
// a mock broker reads a request from the wire it passes the request to the
// function and sends back whatever the handler function returns.
func (b *MockBroker) setHandler(handler requestHandlerFunc) {
b.lock.Lock()
b.handler = handler
b.lock.Unlock()
}
func (b *MockBroker) serverLoop() {
defer close(b.stopper)
var err error
var conn net.Conn
go func() {
<-b.closing
err := b.listener.Close()
if err != nil {
b.t.Error(err)
}
}()
wg := &sync.WaitGroup{}
i := 0
for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
wg.Add(1)
go b.handleRequests(conn, i, wg)
i++
}
wg.Wait()
Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
}
func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
defer wg.Done()
defer func() {
_ = conn.Close()
}()
Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
var err error
abort := make(chan none)
defer close(abort)
go func() {
select {
case <-b.closing:
_ = conn.Close()
case <-abort:
}
}()
resHeader := make([]byte, 8)
for {
req, bytesRead, err := decodeRequest(conn)
if err != nil {
Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
b.serverError(err)
break
}
if b.latency > 0 {
time.Sleep(b.latency)
}
b.lock.Lock()
res := b.handler(req)
b.history = append(b.history, RequestResponse{req.body, res})
b.lock.Unlock()
if res == nil {
Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
continue
}
Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
encodedRes, err := encode(res, nil)
if err != nil {
b.serverError(err)
break
}
if len(encodedRes) == 0 {
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, 0)
}
b.lock.Unlock()
continue
}
binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
if _, err = conn.Write(resHeader); err != nil {
b.serverError(err)
break
}
if _, err = conn.Write(encodedRes); err != nil {
b.serverError(err)
break
}
b.lock.Lock()
if b.notifier != nil {
b.notifier(bytesRead, len(resHeader)+len(encodedRes))
}
b.lock.Unlock()
}
Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
}
func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
select {
case res, ok := <-b.expectations:
if !ok {
return nil
}
return res
case <-time.After(expectationTimeout):
return nil
}
}
func (b *MockBroker) serverError(err error) {
isConnectionClosedError := false
if _, ok := err.(*net.OpError); ok {
isConnectionClosedError = true
} else if err == io.EOF {
isConnectionClosedError = true
} else if err.Error() == "use of closed network connection" {
isConnectionClosedError = true
}
if isConnectionClosedError {
return
}
b.t.Errorf(err.Error())
}
// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
// test framework and a channel of responses to use. If an error occurs it is
// simply logged to the TestReporter and the broker exits.
func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
return NewMockBrokerAddr(t, brokerID, "localhost:0")
}
// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
// it rather than just some ephemeral port.
func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
var err error
broker := &MockBroker{
closing: make(chan none),
stopper: make(chan none),
t: t,
brokerID: brokerID,
expectations: make(chan encoder, 512),
}
broker.handler = broker.defaultRequestHandler
broker.listener, err = net.Listen("tcp", addr)
if err != nil {
t.Fatal(err)
}
Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
_, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
if err != nil {
t.Fatal(err)
}
tmp, err := strconv.ParseInt(portStr, 10, 32)
if err != nil {
t.Fatal(err)
}
broker.port = int32(tmp)
go broker.serverLoop()
return broker
}
func (b *MockBroker) Returns(e encoder) {
b.expectations <- e
}

455
vendor/github.com/Shopify/sarama/mockresponses.go generated vendored Normal file
View File

@ -0,0 +1,455 @@
package sarama
import (
"fmt"
)
// TestReporter has methods matching go's testing.T to avoid importing
// `testing` in the main part of the library.
type TestReporter interface {
Error(...interface{})
Errorf(string, ...interface{})
Fatal(...interface{})
Fatalf(string, ...interface{})
}
// MockResponse is a response builder interface it defines one method that
// allows generating a response based on a request body. MockResponses are used
// to program behavior of MockBroker in tests.
type MockResponse interface {
For(reqBody versionedDecoder) (res encoder)
}
// MockWrapper is a mock response builder that returns a particular concrete
// response regardless of the actual request passed to the `For` method.
type MockWrapper struct {
res encoder
}
func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
return mw.res
}
func NewMockWrapper(res encoder) *MockWrapper {
return &MockWrapper{res: res}
}
// MockSequence is a mock response builder that is created from a sequence of
// concrete responses. Every time when a `MockBroker` calls its `For` method
// the next response from the sequence is returned. When the end of the
// sequence is reached the last element from the sequence is returned.
type MockSequence struct {
responses []MockResponse
}
func NewMockSequence(responses ...interface{}) *MockSequence {
ms := &MockSequence{}
ms.responses = make([]MockResponse, len(responses))
for i, res := range responses {
switch res := res.(type) {
case MockResponse:
ms.responses[i] = res
case encoder:
ms.responses[i] = NewMockWrapper(res)
default:
panic(fmt.Sprintf("Unexpected response type: %T", res))
}
}
return ms
}
func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
res = mc.responses[0].For(reqBody)
if len(mc.responses) > 1 {
mc.responses = mc.responses[1:]
}
return res
}
// MockMetadataResponse is a `MetadataResponse` builder.
type MockMetadataResponse struct {
leaders map[string]map[int32]int32
brokers map[string]int32
t TestReporter
}
func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
return &MockMetadataResponse{
leaders: make(map[string]map[int32]int32),
brokers: make(map[string]int32),
t: t,
}
}
func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
partitions := mmr.leaders[topic]
if partitions == nil {
partitions = make(map[int32]int32)
mmr.leaders[topic] = partitions
}
partitions[partition] = brokerID
return mmr
}
func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
mmr.brokers[addr] = brokerID
return mmr
}
func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
metadataRequest := reqBody.(*MetadataRequest)
metadataResponse := &MetadataResponse{}
for addr, brokerID := range mmr.brokers {
metadataResponse.AddBroker(addr, brokerID)
}
if len(metadataRequest.Topics) == 0 {
for topic, partitions := range mmr.leaders {
for partition, brokerID := range partitions {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
for _, topic := range metadataRequest.Topics {
for partition, brokerID := range mmr.leaders[topic] {
metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
}
}
return metadataResponse
}
// MockOffsetResponse is an `OffsetResponse` builder.
type MockOffsetResponse struct {
offsets map[string]map[int32]map[int64]int64
t TestReporter
}
func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
return &MockOffsetResponse{
offsets: make(map[string]map[int32]map[int64]int64),
t: t,
}
}
func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
partitions := mor.offsets[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]int64)
mor.offsets[topic] = partitions
}
times := partitions[partition]
if times == nil {
times = make(map[int64]int64)
partitions[partition] = times
}
times[time] = offset
return mor
}
func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
offsetRequest := reqBody.(*OffsetRequest)
offsetResponse := &OffsetResponse{}
for topic, partitions := range offsetRequest.blocks {
for partition, block := range partitions {
offset := mor.getOffset(topic, partition, block.time)
offsetResponse.AddTopicPartition(topic, partition, offset)
}
}
return offsetResponse
}
func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
partitions := mor.offsets[topic]
if partitions == nil {
mor.t.Errorf("missing topic: %s", topic)
}
times := partitions[partition]
if times == nil {
mor.t.Errorf("missing partition: %d", partition)
}
offset, ok := times[time]
if !ok {
mor.t.Errorf("missing time: %d", time)
}
return offset
}
// MockFetchResponse is a `FetchResponse` builder.
type MockFetchResponse struct {
messages map[string]map[int32]map[int64]Encoder
highWaterMarks map[string]map[int32]int64
t TestReporter
batchSize int
}
func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
return &MockFetchResponse{
messages: make(map[string]map[int32]map[int64]Encoder),
highWaterMarks: make(map[string]map[int32]int64),
t: t,
batchSize: batchSize,
}
}
func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
partitions := mfr.messages[topic]
if partitions == nil {
partitions = make(map[int32]map[int64]Encoder)
mfr.messages[topic] = partitions
}
messages := partitions[partition]
if messages == nil {
messages = make(map[int64]Encoder)
partitions[partition] = messages
}
messages[offset] = msg
return mfr
}
func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
partitions = make(map[int32]int64)
mfr.highWaterMarks[topic] = partitions
}
partitions[partition] = offset
return mfr
}
func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
fetchRequest := reqBody.(*FetchRequest)
res := &FetchResponse{}
for topic, partitions := range fetchRequest.blocks {
for partition, block := range partitions {
initialOffset := block.fetchOffset
offset := initialOffset
maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
for i := 0; i < mfr.batchSize && offset < maxOffset; {
msg := mfr.getMessage(topic, partition, offset)
if msg != nil {
res.AddMessage(topic, partition, nil, msg, offset)
i++
}
offset++
}
fb := res.GetBlock(topic, partition)
if fb == nil {
res.AddError(topic, partition, ErrNoError)
fb = res.GetBlock(topic, partition)
}
fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
}
}
return res
}
func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
partitions := mfr.messages[topic]
if partitions == nil {
return nil
}
messages := partitions[partition]
if messages == nil {
return nil
}
return messages[offset]
}
func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
partitions := mfr.messages[topic]
if partitions == nil {
return 0
}
messages := partitions[partition]
if messages == nil {
return 0
}
return len(messages)
}
func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
partitions := mfr.highWaterMarks[topic]
if partitions == nil {
return 0
}
return partitions[partition]
}
// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
type MockConsumerMetadataResponse struct {
coordinators map[string]interface{}
t TestReporter
}
func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
return &MockConsumerMetadataResponse{
coordinators: make(map[string]interface{}),
t: t,
}
}
func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
mr.coordinators[group] = broker
return mr
}
func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
mr.coordinators[group] = kerror
return mr
}
func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ConsumerMetadataRequest)
group := req.ConsumerGroup
res := &ConsumerMetadataResponse{}
v := mr.coordinators[group]
switch v := v.(type) {
case *MockBroker:
res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
case KError:
res.Err = v
}
return res
}
// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
type MockOffsetCommitResponse struct {
errors map[string]map[string]map[int32]KError
t TestReporter
}
func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
return &MockOffsetCommitResponse{t: t}
}
func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[string]map[int32]KError)
}
topics := mr.errors[group]
if topics == nil {
topics = make(map[string]map[int32]KError)
mr.errors[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]KError)
topics[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*OffsetCommitRequest)
group := req.ConsumerGroup
res := &OffsetCommitResponse{}
for topic, partitions := range req.blocks {
for partition := range partitions {
res.AddError(topic, partition, mr.getError(group, topic, partition))
}
}
return res
}
func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
topics := mr.errors[group]
if topics == nil {
return ErrNoError
}
partitions := topics[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// MockProduceResponse is a `ProduceResponse` builder.
type MockProduceResponse struct {
errors map[string]map[int32]KError
t TestReporter
}
func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
return &MockProduceResponse{t: t}
}
func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
if mr.errors == nil {
mr.errors = make(map[string]map[int32]KError)
}
partitions := mr.errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
mr.errors[topic] = partitions
}
partitions[partition] = kerror
return mr
}
func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*ProduceRequest)
res := &ProduceResponse{}
for topic, partitions := range req.msgSets {
for partition := range partitions {
res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
}
}
return res
}
func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
partitions := mr.errors[topic]
if partitions == nil {
return ErrNoError
}
kerror, ok := partitions[partition]
if !ok {
return ErrNoError
}
return kerror
}
// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
type MockOffsetFetchResponse struct {
offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
t TestReporter
}
func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
return &MockOffsetFetchResponse{t: t}
}
func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
if mr.offsets == nil {
mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
}
topics := mr.offsets[group]
if topics == nil {
topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
mr.offsets[group] = topics
}
partitions := topics[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
topics[topic] = partitions
}
partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
return mr
}
func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
req := reqBody.(*OffsetFetchRequest)
group := req.ConsumerGroup
res := &OffsetFetchResponse{}
for topic, partitions := range mr.offsets[group] {
for partition, block := range partitions {
res.AddBlock(topic, partition, block)
}
}
return res
}

View File

@ -0,0 +1,190 @@
package sarama
// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
// tells the broker to set the timestamp to the time at which the request was received.
// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
const ReceiveTime int64 = -1
// GroupGenerationUndefined is a special value for the group generation field of
// Offset Commit Requests that should be used when a consumer group does not rely
// on Kafka for partition management.
const GroupGenerationUndefined = -1
type offsetCommitRequestBlock struct {
offset int64
timestamp int64
metadata string
}
func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
pe.putInt64(b.offset)
if version == 1 {
pe.putInt64(b.timestamp)
} else if b.timestamp != 0 {
Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
}
return pe.putString(b.metadata)
}
func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
if b.offset, err = pd.getInt64(); err != nil {
return err
}
if version == 1 {
if b.timestamp, err = pd.getInt64(); err != nil {
return err
}
}
b.metadata, err = pd.getString()
return err
}
type OffsetCommitRequest struct {
ConsumerGroup string
ConsumerGroupGeneration int32 // v1 or later
ConsumerID string // v1 or later
RetentionTime int64 // v2 or later
// Version can be:
// - 0 (kafka 0.8.1 and later)
// - 1 (kafka 0.8.2 and later)
// - 2 (kafka 0.9.0 and later)
Version int16
blocks map[string]map[int32]*offsetCommitRequestBlock
}
func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
if r.Version < 0 || r.Version > 2 {
return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
}
if err := pe.putString(r.ConsumerGroup); err != nil {
return err
}
if r.Version >= 1 {
pe.putInt32(r.ConsumerGroupGeneration)
if err := pe.putString(r.ConsumerID); err != nil {
return err
}
} else {
if r.ConsumerGroupGeneration != 0 {
Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
}
if r.ConsumerID != "" {
Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
}
}
if r.Version >= 2 {
pe.putInt64(r.RetentionTime)
} else if r.RetentionTime != 0 {
Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
}
if err := pe.putArrayLength(len(r.blocks)); err != nil {
return err
}
for topic, partitions := range r.blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe, r.Version); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
if r.Version >= 1 {
if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
return err
}
if r.ConsumerID, err = pd.getString(); err != nil {
return err
}
}
if r.Version >= 2 {
if r.RetentionTime, err = pd.getInt64(); err != nil {
return err
}
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetCommitRequestBlock{}
if err := block.decode(pd, r.Version); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetCommitRequest) key() int16 {
return 8
}
func (r *OffsetCommitRequest) version() int16 {
return r.Version
}
func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
case 2:
return V0_9_0_0
default:
return minVersion
}
}
func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
}
r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
}

View File

@ -0,0 +1,85 @@
package sarama
type OffsetCommitResponse struct {
Errors map[string]map[int32]KError
}
func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
if r.Errors == nil {
r.Errors = make(map[string]map[int32]KError)
}
partitions := r.Errors[topic]
if partitions == nil {
partitions = make(map[int32]KError)
r.Errors[topic] = partitions
}
partitions[partition] = kerror
}
func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Errors)); err != nil {
return err
}
for topic, partitions := range r.Errors {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, kerror := range partitions {
pe.putInt32(partition)
pe.putInt16(int16(kerror))
}
}
return nil
}
func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Errors = make(map[string]map[int32]KError, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numErrors, err := pd.getArrayLength()
if err != nil {
return err
}
r.Errors[name] = make(map[int32]KError, numErrors)
for j := 0; j < numErrors; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
r.Errors[name][id] = KError(tmp)
}
}
return nil
}
func (r *OffsetCommitResponse) key() int16 {
return 8
}
func (r *OffsetCommitResponse) version() int16 {
return 0
}
func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
return minVersion
}

View File

@ -0,0 +1,81 @@
package sarama
type OffsetFetchRequest struct {
ConsumerGroup string
Version int16
partitions map[string][]int32
}
func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
if r.Version < 0 || r.Version > 1 {
return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
}
if err = pe.putString(r.ConsumerGroup); err != nil {
return err
}
if err = pe.putArrayLength(len(r.partitions)); err != nil {
return err
}
for topic, partitions := range r.partitions {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putInt32Array(partitions); err != nil {
return err
}
}
return nil
}
func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
if r.ConsumerGroup, err = pd.getString(); err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
if partitionCount == 0 {
return nil
}
r.partitions = make(map[string][]int32)
for i := 0; i < partitionCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitions, err := pd.getInt32Array()
if err != nil {
return err
}
r.partitions[topic] = partitions
}
return nil
}
func (r *OffsetFetchRequest) key() int16 {
return 9
}
func (r *OffsetFetchRequest) version() int16 {
return r.Version
}
func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_8_2_0
default:
return minVersion
}
}
func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
if r.partitions == nil {
r.partitions = make(map[string][]int32)
}
r.partitions[topic] = append(r.partitions[topic], partitionID)
}

View File

@ -0,0 +1,143 @@
package sarama
type OffsetFetchResponseBlock struct {
Offset int64
Metadata string
Err KError
}
func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
b.Metadata, err = pd.getString()
if err != nil {
return err
}
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
return nil
}
func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
pe.putInt64(b.Offset)
err = pe.putString(b.Metadata)
if err != nil {
return err
}
pe.putInt16(int16(b.Err))
return nil
}
type OffsetFetchResponse struct {
Blocks map[string]map[int32]*OffsetFetchResponseBlock
}
func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
if err := pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err := pe.putString(topic); err != nil {
return err
}
if err := pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err := block.encode(pe); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil || numTopics == 0 {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
if numBlocks == 0 {
r.Blocks[name] = nil
continue
}
r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetFetchResponseBlock)
err = block.decode(pd)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetFetchResponse) key() int16 {
return 9
}
func (r *OffsetFetchResponse) version() int16 {
return 0
}
func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
return minVersion
}
func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
}
partitions := r.Blocks[topic]
if partitions == nil {
partitions = make(map[int32]*OffsetFetchResponseBlock)
r.Blocks[topic] = partitions
}
partitions[partition] = block
}

542
vendor/github.com/Shopify/sarama/offset_manager.go generated vendored Normal file
View File

@ -0,0 +1,542 @@
package sarama
import (
"sync"
"time"
)
// Offset Manager
// OffsetManager uses Kafka to store and fetch consumed partition offsets.
type OffsetManager interface {
// ManagePartition creates a PartitionOffsetManager on the given topic/partition.
// It will return an error if this OffsetManager is already managing the given
// topic/partition.
ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
// Close stops the OffsetManager from managing offsets. It is required to call
// this function before an OffsetManager object passes out of scope, as it
// will otherwise leak memory. You must call this after all the
// PartitionOffsetManagers are closed.
Close() error
}
type offsetManager struct {
client Client
conf *Config
group string
lock sync.Mutex
poms map[string]map[int32]*partitionOffsetManager
boms map[*Broker]*brokerOffsetManager
}
// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
// It is still necessary to call Close() on the underlying client when finished with the partition manager.
func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
// Check that we are not dealing with a closed Client before processing any other arguments
if client.Closed() {
return nil, ErrClosedClient
}
om := &offsetManager{
client: client,
conf: client.Config(),
group: group,
poms: make(map[string]map[int32]*partitionOffsetManager),
boms: make(map[*Broker]*brokerOffsetManager),
}
return om, nil
}
func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
pom, err := om.newPartitionOffsetManager(topic, partition)
if err != nil {
return nil, err
}
om.lock.Lock()
defer om.lock.Unlock()
topicManagers := om.poms[topic]
if topicManagers == nil {
topicManagers = make(map[int32]*partitionOffsetManager)
om.poms[topic] = topicManagers
}
if topicManagers[partition] != nil {
return nil, ConfigurationError("That topic/partition is already being managed")
}
topicManagers[partition] = pom
return pom, nil
}
func (om *offsetManager) Close() error {
return nil
}
func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
om.lock.Lock()
defer om.lock.Unlock()
bom := om.boms[broker]
if bom == nil {
bom = om.newBrokerOffsetManager(broker)
om.boms[broker] = bom
}
bom.refs++
return bom
}
func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
bom.refs--
if bom.refs == 0 {
close(bom.updateSubscriptions)
if om.boms[bom.broker] == bom {
delete(om.boms, bom.broker)
}
}
}
func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
delete(om.boms, bom.broker)
}
func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
om.lock.Lock()
defer om.lock.Unlock()
delete(om.poms[pom.topic], pom.partition)
if len(om.poms[pom.topic]) == 0 {
delete(om.poms, pom.topic)
}
}
// Partition Offset Manager
// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
// out of scope.
type PartitionOffsetManager interface {
// NextOffset returns the next offset that should be consumed for the managed
// partition, accompanied by metadata which can be used to reconstruct the state
// of the partition consumer when it resumes. NextOffset() will return
// `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
// was committed for this partition yet.
NextOffset() (int64, string)
// MarkOffset marks the provided offset, alongside a metadata string
// that represents the state of the partition consumer at that point in time. The
// metadata string can be used by another consumer to restore that state, so it
// can resume consumption.
//
// To follow upstream conventions, you are expected to mark the offset of the
// next message to read, not the last message read. Thus, when calling `MarkOffset`
// you should typically add one to the offset of the last consumed message.
//
// Note: calling MarkOffset does not necessarily commit the offset to the backend
// store immediately for efficiency reasons, and it may never be committed if
// your application crashes. This means that you may end up processing the same
// message twice, and your processing should ideally be idempotent.
MarkOffset(offset int64, metadata string)
// Errors returns a read channel of errors that occur during offset management, if
// enabled. By default, errors are logged and not returned over this channel. If
// you want to implement any custom error handling, set your config's
// Consumer.Return.Errors setting to true, and read from this channel.
Errors() <-chan *ConsumerError
// AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
// return immediately, after which you should wait until the 'errors' channel has
// been drained and closed. It is required to call this function, or Close before
// a consumer object passes out of scope, as it will otherwise leak memory. You
// must call this before calling Close on the underlying client.
AsyncClose()
// Close stops the PartitionOffsetManager from managing offsets. It is required to
// call this function (or AsyncClose) before a PartitionOffsetManager object
// passes out of scope, as it will otherwise leak memory. You must call this
// before calling Close on the underlying client.
Close() error
}
type partitionOffsetManager struct {
parent *offsetManager
topic string
partition int32
lock sync.Mutex
offset int64
metadata string
dirty bool
clean sync.Cond
broker *brokerOffsetManager
errors chan *ConsumerError
rebalance chan none
dying chan none
}
func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
pom := &partitionOffsetManager{
parent: om,
topic: topic,
partition: partition,
errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
rebalance: make(chan none, 1),
dying: make(chan none),
}
pom.clean.L = &pom.lock
if err := pom.selectBroker(); err != nil {
return nil, err
}
if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
return nil, err
}
pom.broker.updateSubscriptions <- pom
go withRecover(pom.mainLoop)
return pom, nil
}
func (pom *partitionOffsetManager) mainLoop() {
for {
select {
case <-pom.rebalance:
if err := pom.selectBroker(); err != nil {
pom.handleError(err)
pom.rebalance <- none{}
} else {
pom.broker.updateSubscriptions <- pom
}
case <-pom.dying:
if pom.broker != nil {
select {
case <-pom.rebalance:
case pom.broker.updateSubscriptions <- pom:
}
pom.parent.unrefBrokerOffsetManager(pom.broker)
}
pom.parent.abandonPartitionOffsetManager(pom)
close(pom.errors)
return
}
}
}
func (pom *partitionOffsetManager) selectBroker() error {
if pom.broker != nil {
pom.parent.unrefBrokerOffsetManager(pom.broker)
pom.broker = nil
}
var broker *Broker
var err error
if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
return err
}
if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
return err
}
pom.broker = pom.parent.refBrokerOffsetManager(broker)
return nil
}
func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
request := new(OffsetFetchRequest)
request.Version = 1
request.ConsumerGroup = pom.parent.group
request.AddPartition(pom.topic, pom.partition)
response, err := pom.broker.broker.FetchOffset(request)
if err != nil {
return err
}
block := response.GetBlock(pom.topic, pom.partition)
if block == nil {
return ErrIncompleteResponse
}
switch block.Err {
case ErrNoError:
pom.offset = block.Offset
pom.metadata = block.Metadata
return nil
case ErrNotCoordinatorForConsumer:
if retries <= 0 {
return block.Err
}
if err := pom.selectBroker(); err != nil {
return err
}
return pom.fetchInitialOffset(retries - 1)
case ErrOffsetsLoadInProgress:
if retries <= 0 {
return block.Err
}
time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
return pom.fetchInitialOffset(retries - 1)
default:
return block.Err
}
}
func (pom *partitionOffsetManager) handleError(err error) {
cErr := &ConsumerError{
Topic: pom.topic,
Partition: pom.partition,
Err: err,
}
if pom.parent.conf.Consumer.Return.Errors {
pom.errors <- cErr
} else {
Logger.Println(cErr)
}
}
func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
return pom.errors
}
func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if offset > pom.offset {
pom.offset = offset
pom.metadata = metadata
pom.dirty = true
}
}
func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if pom.offset == offset && pom.metadata == metadata {
pom.dirty = false
pom.clean.Signal()
}
}
func (pom *partitionOffsetManager) NextOffset() (int64, string) {
pom.lock.Lock()
defer pom.lock.Unlock()
if pom.offset >= 0 {
return pom.offset, pom.metadata
}
return pom.parent.conf.Consumer.Offsets.Initial, ""
}
func (pom *partitionOffsetManager) AsyncClose() {
go func() {
pom.lock.Lock()
defer pom.lock.Unlock()
for pom.dirty {
pom.clean.Wait()
}
close(pom.dying)
}()
}
func (pom *partitionOffsetManager) Close() error {
pom.AsyncClose()
var errors ConsumerErrors
for err := range pom.errors {
errors = append(errors, err)
}
if len(errors) > 0 {
return errors
}
return nil
}
// Broker Offset Manager
type brokerOffsetManager struct {
parent *offsetManager
broker *Broker
timer *time.Ticker
updateSubscriptions chan *partitionOffsetManager
subscriptions map[*partitionOffsetManager]none
refs int
}
func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
bom := &brokerOffsetManager{
parent: om,
broker: broker,
timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
updateSubscriptions: make(chan *partitionOffsetManager),
subscriptions: make(map[*partitionOffsetManager]none),
}
go withRecover(bom.mainLoop)
return bom
}
func (bom *brokerOffsetManager) mainLoop() {
for {
select {
case <-bom.timer.C:
if len(bom.subscriptions) > 0 {
bom.flushToBroker()
}
case s, ok := <-bom.updateSubscriptions:
if !ok {
bom.timer.Stop()
return
}
if _, ok := bom.subscriptions[s]; ok {
delete(bom.subscriptions, s)
} else {
bom.subscriptions[s] = none{}
}
}
}
}
func (bom *brokerOffsetManager) flushToBroker() {
request := bom.constructRequest()
if request == nil {
return
}
response, err := bom.broker.CommitOffset(request)
if err != nil {
bom.abort(err)
return
}
for s := range bom.subscriptions {
if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
continue
}
var err KError
var ok bool
if response.Errors[s.topic] == nil {
s.handleError(ErrIncompleteResponse)
delete(bom.subscriptions, s)
s.rebalance <- none{}
continue
}
if err, ok = response.Errors[s.topic][s.partition]; !ok {
s.handleError(ErrIncompleteResponse)
delete(bom.subscriptions, s)
s.rebalance <- none{}
continue
}
switch err {
case ErrNoError:
block := request.blocks[s.topic][s.partition]
s.updateCommitted(block.offset, block.metadata)
case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
// not a critical error, we just need to redispatch
delete(bom.subscriptions, s)
s.rebalance <- none{}
case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
// nothing we can do about this, just tell the user and carry on
s.handleError(err)
case ErrOffsetsLoadInProgress:
// nothing wrong but we didn't commit, we'll get it next time round
break
case ErrUnknownTopicOrPartition:
// let the user know *and* try redispatching - if topic-auto-create is
// enabled, redispatching should trigger a metadata request and create the
// topic; if not then re-dispatching won't help, but we've let the user
// know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
fallthrough
default:
// dunno, tell the user and try redispatching
s.handleError(err)
delete(bom.subscriptions, s)
s.rebalance <- none{}
}
}
}
func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
var r *OffsetCommitRequest
var perPartitionTimestamp int64
if bom.parent.conf.Consumer.Offsets.Retention == 0 {
perPartitionTimestamp = ReceiveTime
r = &OffsetCommitRequest{
Version: 1,
ConsumerGroup: bom.parent.group,
ConsumerGroupGeneration: GroupGenerationUndefined,
}
} else {
r = &OffsetCommitRequest{
Version: 2,
RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
ConsumerGroup: bom.parent.group,
ConsumerGroupGeneration: GroupGenerationUndefined,
}
}
for s := range bom.subscriptions {
s.lock.Lock()
if s.dirty {
r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
}
s.lock.Unlock()
}
if len(r.blocks) > 0 {
return r
}
return nil
}
func (bom *brokerOffsetManager) abort(err error) {
_ = bom.broker.Close() // we don't care about the error this might return, we already have one
bom.parent.abandonBroker(bom)
for pom := range bom.subscriptions {
pom.handleError(err)
pom.rebalance <- none{}
}
for s := range bom.updateSubscriptions {
if _, ok := bom.subscriptions[s]; !ok {
s.handleError(err)
s.rebalance <- none{}
}
}
bom.subscriptions = make(map[*partitionOffsetManager]none)
}

132
vendor/github.com/Shopify/sarama/offset_request.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
package sarama
type offsetRequestBlock struct {
time int64
maxOffsets int32 // Only used in version 0
}
func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
pe.putInt64(int64(b.time))
if version == 0 {
pe.putInt32(b.maxOffsets)
}
return nil
}
func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
if b.time, err = pd.getInt64(); err != nil {
return err
}
if version == 0 {
if b.maxOffsets, err = pd.getInt32(); err != nil {
return err
}
}
return nil
}
type OffsetRequest struct {
Version int16
blocks map[string]map[int32]*offsetRequestBlock
}
func (r *OffsetRequest) encode(pe packetEncoder) error {
pe.putInt32(-1) // replica ID is always -1 for clients
err := pe.putArrayLength(len(r.blocks))
if err != nil {
return err
}
for topic, partitions := range r.blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe, r.Version); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
r.Version = version
// Ignore replica ID
if _, err := pd.getInt32(); err != nil {
return err
}
blockCount, err := pd.getArrayLength()
if err != nil {
return err
}
if blockCount == 0 {
return nil
}
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
for i := 0; i < blockCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
block := &offsetRequestBlock{}
if err := block.decode(pd, version); err != nil {
return err
}
r.blocks[topic][partition] = block
}
}
return nil
}
func (r *OffsetRequest) key() int16 {
return 2
}
func (r *OffsetRequest) version() int16 {
return r.Version
}
func (r *OffsetRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_1_0
default:
return minVersion
}
}
func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
if r.blocks == nil {
r.blocks = make(map[string]map[int32]*offsetRequestBlock)
}
if r.blocks[topic] == nil {
r.blocks[topic] = make(map[int32]*offsetRequestBlock)
}
tmp := new(offsetRequestBlock)
tmp.time = time
if r.Version == 0 {
tmp.maxOffsets = maxOffsets
}
r.blocks[topic][partitionID] = tmp
}

174
vendor/github.com/Shopify/sarama/offset_response.go generated vendored Normal file
View File

@ -0,0 +1,174 @@
package sarama
type OffsetResponseBlock struct {
Err KError
Offsets []int64 // Version 0
Offset int64 // Version 1
Timestamp int64 // Version 1
}
func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
if version == 0 {
b.Offsets, err = pd.getInt64Array()
return err
}
b.Timestamp, err = pd.getInt64()
if err != nil {
return err
}
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
// For backwards compatibility put the offset in the offsets array too
b.Offsets = []int64{b.Offset}
return nil
}
func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
pe.putInt16(int16(b.Err))
if version == 0 {
return pe.putInt64Array(b.Offsets)
}
pe.putInt64(b.Timestamp)
pe.putInt64(b.Offset)
return nil
}
type OffsetResponse struct {
Version int16
Blocks map[string]map[int32]*OffsetResponseBlock
}
func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(OffsetResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
return nil
}
func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
/*
// [0 0 0 1 ntopics
0 8 109 121 95 116 111 112 105 99 topic
0 0 0 1 npartitions
0 0 0 0 id
0 0
0 0 0 1 0 0 0 0
0 1 1 1 0 0 0 1
0 8 109 121 95 116 111 112
105 99 0 0 0 1 0 0
0 0 0 0 0 0 0 1
0 0 0 0 0 1 1 1] <nil>
*/
func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
if err = pe.putArrayLength(len(r.Blocks)); err != nil {
return err
}
for topic, partitions := range r.Blocks {
if err = pe.putString(topic); err != nil {
return err
}
if err = pe.putArrayLength(len(partitions)); err != nil {
return err
}
for partition, block := range partitions {
pe.putInt32(partition)
if err = block.encode(pe, r.version()); err != nil {
return err
}
}
}
return nil
}
func (r *OffsetResponse) key() int16 {
return 2
}
func (r *OffsetResponse) version() int16 {
return r.Version
}
func (r *OffsetResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_10_1_0
default:
return minVersion
}
}
// testing API
func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
}
byTopic, ok := r.Blocks[topic]
if !ok {
byTopic = make(map[int32]*OffsetResponseBlock)
r.Blocks[topic] = byTopic
}
byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
}

45
vendor/github.com/Shopify/sarama/packet_decoder.go generated vendored Normal file
View File

@ -0,0 +1,45 @@
package sarama
// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
// Types implementing Decoder only need to worry about calling methods like GetString,
// not about how a string is represented in Kafka.
type packetDecoder interface {
// Primitives
getInt8() (int8, error)
getInt16() (int16, error)
getInt32() (int32, error)
getInt64() (int64, error)
getArrayLength() (int, error)
// Collections
getBytes() ([]byte, error)
getString() (string, error)
getInt32Array() ([]int32, error)
getInt64Array() ([]int64, error)
getStringArray() ([]string, error)
// Subsets
remaining() int
getSubset(length int) (packetDecoder, error)
// Stacks, see PushDecoder
push(in pushDecoder) error
pop() error
}
// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
// depend upon have been decoded.
type pushDecoder interface {
// Saves the offset into the input buffer as the location to actually read the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and check the field.
// SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
// of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
check(curOffset int, buf []byte) error
}

50
vendor/github.com/Shopify/sarama/packet_encoder.go generated vendored Normal file
View File

@ -0,0 +1,50 @@
package sarama
import "github.com/rcrowley/go-metrics"
// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
// Types implementing Encoder only need to worry about calling methods like PutString,
// not about how a string is represented in Kafka.
type packetEncoder interface {
// Primitives
putInt8(in int8)
putInt16(in int16)
putInt32(in int32)
putInt64(in int64)
putArrayLength(in int) error
// Collections
putBytes(in []byte) error
putRawBytes(in []byte) error
putString(in string) error
putStringArray(in []string) error
putInt32Array(in []int32) error
putInt64Array(in []int64) error
// Provide the current offset to record the batch size metric
offset() int
// Stacks, see PushEncoder
push(in pushEncoder)
pop() error
// To record metrics when provided
metricRegistry() metrics.Registry
}
// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
// depend upon have been written.
type pushEncoder interface {
// Saves the offset into the input buffer as the location to actually write the calculated value when able.
saveOffset(in int)
// Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
reserveLength() int
// Indicates that all required data is now available to calculate and write the field.
// SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
// of data to the saved offset, based on the data between the saved offset and curOffset.
run(curOffset int, buf []byte) error
}

135
vendor/github.com/Shopify/sarama/partitioner.go generated vendored Normal file
View File

@ -0,0 +1,135 @@
package sarama
import (
"hash"
"hash/fnv"
"math/rand"
"time"
)
// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
// as simple default implementations.
type Partitioner interface {
// Partition takes a message and partition count and chooses a partition
Partition(message *ProducerMessage, numPartitions int32) (int32, error)
// RequiresConsistency indicates to the user of the partitioner whether the
// mapping of key->partition is consistent or not. Specifically, if a
// partitioner requires consistency then it must be allowed to choose from all
// partitions (even ones known to be unavailable), and its choice must be
// respected by the caller. The obvious example is the HashPartitioner.
RequiresConsistency() bool
}
// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
type PartitionerConstructor func(topic string) Partitioner
type manualPartitioner struct{}
// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
// ProducerMessage's Partition field as the partition to produce to.
func NewManualPartitioner(topic string) Partitioner {
return new(manualPartitioner)
}
func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return message.Partition, nil
}
func (p *manualPartitioner) RequiresConsistency() bool {
return true
}
type randomPartitioner struct {
generator *rand.Rand
}
// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
func NewRandomPartitioner(topic string) Partitioner {
p := new(randomPartitioner)
p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
return p
}
func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
return int32(p.generator.Intn(int(numPartitions))), nil
}
func (p *randomPartitioner) RequiresConsistency() bool {
return false
}
type roundRobinPartitioner struct {
partition int32
}
// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
func NewRoundRobinPartitioner(topic string) Partitioner {
return &roundRobinPartitioner{}
}
func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if p.partition >= numPartitions {
p.partition = 0
}
ret := p.partition
p.partition++
return ret, nil
}
func (p *roundRobinPartitioner) RequiresConsistency() bool {
return false
}
type hashPartitioner struct {
random Partitioner
hasher hash.Hash32
}
// NewCustomHashPartitioner is a wrapper around NewHashPartitioner, allowing the use of custom hasher.
// The argument is a function providing the instance, implementing the hash.Hash32 interface. This is to ensure that
// each partition dispatcher gets its own hasher, to avoid concurrency issues by sharing an instance.
func NewCustomHashPartitioner(hasher func() hash.Hash32) PartitionerConstructor {
return func(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = hasher()
return p
}
}
// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
// modulus the number of partitions. This ensures that messages with the same key always end up on the
// same partition.
func NewHashPartitioner(topic string) Partitioner {
p := new(hashPartitioner)
p.random = NewRandomPartitioner(topic)
p.hasher = fnv.New32a()
return p
}
func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
if message.Key == nil {
return p.random.Partition(message, numPartitions)
}
bytes, err := message.Key.Encode()
if err != nil {
return -1, err
}
p.hasher.Reset()
_, err = p.hasher.Write(bytes)
if err != nil {
return -1, err
}
partition := int32(p.hasher.Sum32()) % numPartitions
if partition < 0 {
partition = -partition
}
return partition, nil
}
func (p *hashPartitioner) RequiresConsistency() bool {
return true
}

121
vendor/github.com/Shopify/sarama/prep_encoder.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package sarama
import (
"fmt"
"math"
"github.com/rcrowley/go-metrics"
)
type prepEncoder struct {
length int
}
// primitives
func (pe *prepEncoder) putInt8(in int8) {
pe.length++
}
func (pe *prepEncoder) putInt16(in int16) {
pe.length += 2
}
func (pe *prepEncoder) putInt32(in int32) {
pe.length += 4
}
func (pe *prepEncoder) putInt64(in int64) {
pe.length += 8
}
func (pe *prepEncoder) putArrayLength(in int) error {
if in > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
}
pe.length += 4
return nil
}
// arrays
func (pe *prepEncoder) putBytes(in []byte) error {
pe.length += 4
if in == nil {
return nil
}
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putRawBytes(in []byte) error {
if len(in) > math.MaxInt32 {
return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putString(in string) error {
pe.length += 2
if len(in) > math.MaxInt16 {
return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
}
pe.length += len(in)
return nil
}
func (pe *prepEncoder) putStringArray(in []string) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
for _, str := range in {
if err := pe.putString(str); err != nil {
return err
}
}
return nil
}
func (pe *prepEncoder) putInt32Array(in []int32) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 4 * len(in)
return nil
}
func (pe *prepEncoder) putInt64Array(in []int64) error {
err := pe.putArrayLength(len(in))
if err != nil {
return err
}
pe.length += 8 * len(in)
return nil
}
func (pe *prepEncoder) offset() int {
return pe.length
}
// stackable
func (pe *prepEncoder) push(in pushEncoder) {
pe.length += in.reserveLength()
}
func (pe *prepEncoder) pop() error {
return nil
}
// we do not record metrics during the prep encoder pass
func (pe *prepEncoder) metricRegistry() metrics.Registry {
return nil
}

209
vendor/github.com/Shopify/sarama/produce_request.go generated vendored Normal file
View File

@ -0,0 +1,209 @@
package sarama
import "github.com/rcrowley/go-metrics"
// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
// it must see before responding. Any of the constants defined here are valid. On broker versions
// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
// by setting the `min.isr` value in the brokers configuration).
type RequiredAcks int16
const (
// NoResponse doesn't send any response, the TCP ACK is all you get.
NoResponse RequiredAcks = 0
// WaitForLocal waits for only the local commit to succeed before responding.
WaitForLocal RequiredAcks = 1
// WaitForAll waits for all in-sync replicas to commit before responding.
// The minimum number of in-sync replicas is configured on the broker via
// the `min.insync.replicas` configuration key.
WaitForAll RequiredAcks = -1
)
type ProduceRequest struct {
RequiredAcks RequiredAcks
Timeout int32
Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
msgSets map[string]map[int32]*MessageSet
}
func (r *ProduceRequest) encode(pe packetEncoder) error {
pe.putInt16(int16(r.RequiredAcks))
pe.putInt32(r.Timeout)
err := pe.putArrayLength(len(r.msgSets))
if err != nil {
return err
}
metricRegistry := pe.metricRegistry()
var batchSizeMetric metrics.Histogram
var compressionRatioMetric metrics.Histogram
if metricRegistry != nil {
batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
}
totalRecordCount := int64(0)
for topic, partitions := range r.msgSets {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
topicRecordCount := int64(0)
var topicCompressionRatioMetric metrics.Histogram
if metricRegistry != nil {
topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
}
for id, msgSet := range partitions {
startOffset := pe.offset()
pe.putInt32(id)
pe.push(&lengthField{})
err = msgSet.encode(pe)
if err != nil {
return err
}
err = pe.pop()
if err != nil {
return err
}
if metricRegistry != nil {
for _, messageBlock := range msgSet.Messages {
// Is this a fake "message" wrapping real messages?
if messageBlock.Msg.Set != nil {
topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
} else {
// A single uncompressed message
topicRecordCount++
}
// Better be safe than sorry when computing the compression ratio
if messageBlock.Msg.compressedSize != 0 {
compressionRatio := float64(len(messageBlock.Msg.Value)) /
float64(messageBlock.Msg.compressedSize)
// Histogram do not support decimal values, let's multiple it by 100 for better precision
intCompressionRatio := int64(100 * compressionRatio)
compressionRatioMetric.Update(intCompressionRatio)
topicCompressionRatioMetric.Update(intCompressionRatio)
}
}
batchSize := int64(pe.offset() - startOffset)
batchSizeMetric.Update(batchSize)
getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
}
}
if topicRecordCount > 0 {
getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
totalRecordCount += topicRecordCount
}
}
if totalRecordCount > 0 {
metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
}
return nil
}
func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
requiredAcks, err := pd.getInt16()
if err != nil {
return err
}
r.RequiredAcks = RequiredAcks(requiredAcks)
if r.Timeout, err = pd.getInt32(); err != nil {
return err
}
topicCount, err := pd.getArrayLength()
if err != nil {
return err
}
if topicCount == 0 {
return nil
}
r.msgSets = make(map[string]map[int32]*MessageSet)
for i := 0; i < topicCount; i++ {
topic, err := pd.getString()
if err != nil {
return err
}
partitionCount, err := pd.getArrayLength()
if err != nil {
return err
}
r.msgSets[topic] = make(map[int32]*MessageSet)
for j := 0; j < partitionCount; j++ {
partition, err := pd.getInt32()
if err != nil {
return err
}
messageSetSize, err := pd.getInt32()
if err != nil {
return err
}
msgSetDecoder, err := pd.getSubset(int(messageSetSize))
if err != nil {
return err
}
msgSet := &MessageSet{}
err = msgSet.decode(msgSetDecoder)
if err != nil {
return err
}
r.msgSets[topic][partition] = msgSet
}
}
return nil
}
func (r *ProduceRequest) key() int16 {
return 0
}
func (r *ProduceRequest) version() int16 {
return r.Version
}
func (r *ProduceRequest) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
default:
return minVersion
}
}
func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
if r.msgSets == nil {
r.msgSets = make(map[string]map[int32]*MessageSet)
}
if r.msgSets[topic] == nil {
r.msgSets[topic] = make(map[int32]*MessageSet)
}
set := r.msgSets[topic][partition]
if set == nil {
set = new(MessageSet)
r.msgSets[topic][partition] = set
}
set.addMessage(msg)
}
func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
if r.msgSets == nil {
r.msgSets = make(map[string]map[int32]*MessageSet)
}
if r.msgSets[topic] == nil {
r.msgSets[topic] = make(map[int32]*MessageSet)
}
r.msgSets[topic][partition] = set
}

159
vendor/github.com/Shopify/sarama/produce_response.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
package sarama
import "time"
type ProduceResponseBlock struct {
Err KError
Offset int64
// only provided if Version >= 2 and the broker is configured with `LogAppendTime`
Timestamp time.Time
}
func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
tmp, err := pd.getInt16()
if err != nil {
return err
}
b.Err = KError(tmp)
b.Offset, err = pd.getInt64()
if err != nil {
return err
}
if version >= 2 {
if millis, err := pd.getInt64(); err != nil {
return err
} else if millis != -1 {
b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
}
}
return nil
}
type ProduceResponse struct {
Blocks map[string]map[int32]*ProduceResponseBlock
Version int16
ThrottleTime time.Duration // only provided if Version >= 1
}
func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
r.Version = version
numTopics, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
for i := 0; i < numTopics; i++ {
name, err := pd.getString()
if err != nil {
return err
}
numBlocks, err := pd.getArrayLength()
if err != nil {
return err
}
r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
for j := 0; j < numBlocks; j++ {
id, err := pd.getInt32()
if err != nil {
return err
}
block := new(ProduceResponseBlock)
err = block.decode(pd, version)
if err != nil {
return err
}
r.Blocks[name][id] = block
}
}
if r.Version >= 1 {
millis, err := pd.getInt32()
if err != nil {
return err
}
r.ThrottleTime = time.Duration(millis) * time.Millisecond
}
return nil
}
func (r *ProduceResponse) encode(pe packetEncoder) error {
err := pe.putArrayLength(len(r.Blocks))
if err != nil {
return err
}
for topic, partitions := range r.Blocks {
err = pe.putString(topic)
if err != nil {
return err
}
err = pe.putArrayLength(len(partitions))
if err != nil {
return err
}
for id, prb := range partitions {
pe.putInt32(id)
pe.putInt16(int16(prb.Err))
pe.putInt64(prb.Offset)
}
}
if r.Version >= 1 {
pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
}
return nil
}
func (r *ProduceResponse) key() int16 {
return 0
}
func (r *ProduceResponse) version() int16 {
return r.Version
}
func (r *ProduceResponse) requiredVersion() KafkaVersion {
switch r.Version {
case 1:
return V0_9_0_0
case 2:
return V0_10_0_0
default:
return minVersion
}
}
func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
if r.Blocks == nil {
return nil
}
if r.Blocks[topic] == nil {
return nil
}
return r.Blocks[topic][partition]
}
// Testing API
func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
if r.Blocks == nil {
r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
}
byTopic, ok := r.Blocks[topic]
if !ok {
byTopic = make(map[int32]*ProduceResponseBlock)
r.Blocks[topic] = byTopic
}
byTopic[partition] = &ProduceResponseBlock{Err: err}
}

176
vendor/github.com/Shopify/sarama/produce_set.go generated vendored Normal file
View File

@ -0,0 +1,176 @@
package sarama
import "time"
type partitionSet struct {
msgs []*ProducerMessage
setToSend *MessageSet
bufferBytes int
}
type produceSet struct {
parent *asyncProducer
msgs map[string]map[int32]*partitionSet
bufferBytes int
bufferCount int
}
func newProduceSet(parent *asyncProducer) *produceSet {
return &produceSet{
msgs: make(map[string]map[int32]*partitionSet),
parent: parent,
}
}
func (ps *produceSet) add(msg *ProducerMessage) error {
var err error
var key, val []byte
if msg.Key != nil {
if key, err = msg.Key.Encode(); err != nil {
return err
}
}
if msg.Value != nil {
if val, err = msg.Value.Encode(); err != nil {
return err
}
}
partitions := ps.msgs[msg.Topic]
if partitions == nil {
partitions = make(map[int32]*partitionSet)
ps.msgs[msg.Topic] = partitions
}
set := partitions[msg.Partition]
if set == nil {
set = &partitionSet{setToSend: new(MessageSet)}
partitions[msg.Partition] = set
}
set.msgs = append(set.msgs, msg)
msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
if msg.Timestamp.IsZero() {
msgToSend.Timestamp = time.Now()
} else {
msgToSend.Timestamp = msg.Timestamp
}
msgToSend.Version = 1
}
set.setToSend.addMessage(msgToSend)
size := producerMessageOverhead + len(key) + len(val)
set.bufferBytes += size
ps.bufferBytes += size
ps.bufferCount++
return nil
}
func (ps *produceSet) buildRequest() *ProduceRequest {
req := &ProduceRequest{
RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
req.Version = 2
}
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
if ps.parent.conf.Producer.Compression == CompressionNone {
req.AddSet(topic, partition, set.setToSend)
} else {
// When compression is enabled, the entire set for each partition is compressed
// and sent as the payload of a single fake "message" with the appropriate codec
// set and no key. When the server sees a message with a compression codec, it
// decompresses the payload and treats the result as its message set.
payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
if err != nil {
Logger.Println(err) // if this happens, it's basically our fault.
panic(err)
}
compMsg := &Message{
Codec: ps.parent.conf.Producer.Compression,
Key: nil,
Value: payload,
Set: set.setToSend, // Provide the underlying message set for accurate metrics
}
if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
compMsg.Version = 1
compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
}
req.AddMessage(topic, partition, compMsg)
}
}
}
return req
}
func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
for topic, partitionSet := range ps.msgs {
for partition, set := range partitionSet {
cb(topic, partition, set.msgs)
}
}
}
func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
if ps.msgs[topic] == nil {
return nil
}
set := ps.msgs[topic][partition]
if set == nil {
return nil
}
ps.bufferBytes -= set.bufferBytes
ps.bufferCount -= len(set.msgs)
delete(ps.msgs[topic], partition)
return set.msgs
}
func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
switch {
// Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
return true
// Would we overflow the size-limit of a compressed message-batch for this partition?
case ps.parent.conf.Producer.Compression != CompressionNone &&
ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
return true
// Would we overflow simply in number of messages?
case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
return true
default:
return false
}
}
func (ps *produceSet) readyToFlush() bool {
switch {
// If we don't have any messages, nothing else matters
case ps.empty():
return false
// If all three config values are 0, we always flush as-fast-as-possible
case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
return true
// If we've passed the message trigger-point
case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
return true
// If we've passed the byte trigger-point
case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
return true
default:
return false
}
}
func (ps *produceSet) empty() bool {
return ps.bufferCount == 0
}

260
vendor/github.com/Shopify/sarama/real_decoder.go generated vendored Normal file
View File

@ -0,0 +1,260 @@
package sarama
import (
"encoding/binary"
"math"
)
var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
var errInvalidStringLength = PacketDecodingError{"invalid string length"}
var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
type realDecoder struct {
raw []byte
off int
stack []pushDecoder
}
// primitives
func (rd *realDecoder) getInt8() (int8, error) {
if rd.remaining() < 1 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int8(rd.raw[rd.off])
rd.off++
return tmp, nil
}
func (rd *realDecoder) getInt16() (int16, error) {
if rd.remaining() < 2 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
rd.off += 2
return tmp, nil
}
func (rd *realDecoder) getInt32() (int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
return tmp, nil
}
func (rd *realDecoder) getInt64() (int64, error) {
if rd.remaining() < 8 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
return tmp, nil
}
func (rd *realDecoder) getArrayLength() (int, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
}
tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if tmp > rd.remaining() {
rd.off = len(rd.raw)
return -1, ErrInsufficientData
} else if tmp > 2*math.MaxUint16 {
return -1, errInvalidArrayLength
}
return tmp, nil
}
// collections
func (rd *realDecoder) getBytes() ([]byte, error) {
tmp, err := rd.getInt32()
if err != nil {
return nil, err
}
n := int(tmp)
switch {
case n < -1:
return nil, errInvalidByteSliceLength
case n == -1:
return nil, nil
case n == 0:
return make([]byte, 0), nil
case n > rd.remaining():
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
tmpStr := rd.raw[rd.off : rd.off+n]
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getString() (string, error) {
tmp, err := rd.getInt16()
if err != nil {
return "", err
}
n := int(tmp)
switch {
case n < -1:
return "", errInvalidStringLength
case n == -1:
return "", nil
case n == 0:
return "", nil
case n > rd.remaining():
rd.off = len(rd.raw)
return "", ErrInsufficientData
}
tmpStr := string(rd.raw[rd.off : rd.off+n])
rd.off += n
return tmpStr, nil
}
func (rd *realDecoder) getInt32Array() ([]int32, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 4*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]int32, n)
for i := range ret {
ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
}
return ret, nil
}
func (rd *realDecoder) getInt64Array() ([]int64, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if rd.remaining() < 8*n {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]int64, n)
for i := range ret {
ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
rd.off += 8
}
return ret, nil
}
func (rd *realDecoder) getStringArray() ([]string, error) {
if rd.remaining() < 4 {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
rd.off += 4
if n == 0 {
return nil, nil
}
if n < 0 {
return nil, errInvalidArrayLength
}
ret := make([]string, n)
for i := range ret {
str, err := rd.getString()
if err != nil {
return nil, err
}
ret[i] = str
}
return ret, nil
}
// subsets
func (rd *realDecoder) remaining() int {
return len(rd.raw) - rd.off
}
func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
if length < 0 {
return nil, errInvalidSubsetSize
} else if length > rd.remaining() {
rd.off = len(rd.raw)
return nil, ErrInsufficientData
}
start := rd.off
rd.off += length
return &realDecoder{raw: rd.raw[start:rd.off]}, nil
}
// stacks
func (rd *realDecoder) push(in pushDecoder) error {
in.saveOffset(rd.off)
reserve := in.reserveLength()
if rd.remaining() < reserve {
rd.off = len(rd.raw)
return ErrInsufficientData
}
rd.stack = append(rd.stack, in)
rd.off += reserve
return nil
}
func (rd *realDecoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := rd.stack[len(rd.stack)-1]
rd.stack = rd.stack[:len(rd.stack)-1]
return in.check(rd.off, rd.raw)
}

129
vendor/github.com/Shopify/sarama/real_encoder.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
package sarama
import (
"encoding/binary"
"github.com/rcrowley/go-metrics"
)
type realEncoder struct {
raw []byte
off int
stack []pushEncoder
registry metrics.Registry
}
// primitives
func (re *realEncoder) putInt8(in int8) {
re.raw[re.off] = byte(in)
re.off++
}
func (re *realEncoder) putInt16(in int16) {
binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
re.off += 2
}
func (re *realEncoder) putInt32(in int32) {
binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
re.off += 4
}
func (re *realEncoder) putInt64(in int64) {
binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
re.off += 8
}
func (re *realEncoder) putArrayLength(in int) error {
re.putInt32(int32(in))
return nil
}
// collection
func (re *realEncoder) putRawBytes(in []byte) error {
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putBytes(in []byte) error {
if in == nil {
re.putInt32(-1)
return nil
}
re.putInt32(int32(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putString(in string) error {
re.putInt16(int16(len(in)))
copy(re.raw[re.off:], in)
re.off += len(in)
return nil
}
func (re *realEncoder) putStringArray(in []string) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
if err := re.putString(val); err != nil {
return err
}
}
return nil
}
func (re *realEncoder) putInt32Array(in []int32) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt32(val)
}
return nil
}
func (re *realEncoder) putInt64Array(in []int64) error {
err := re.putArrayLength(len(in))
if err != nil {
return err
}
for _, val := range in {
re.putInt64(val)
}
return nil
}
func (re *realEncoder) offset() int {
return re.off
}
// stacks
func (re *realEncoder) push(in pushEncoder) {
in.saveOffset(re.off)
re.off += in.reserveLength()
re.stack = append(re.stack, in)
}
func (re *realEncoder) pop() error {
// this is go's ugly pop pattern (the inverse of append)
in := re.stack[len(re.stack)-1]
re.stack = re.stack[:len(re.stack)-1]
return in.run(re.off, re.raw)
}
// we do record metrics during the real encoder pass
func (re *realEncoder) metricRegistry() metrics.Registry {
return re.registry
}

119
vendor/github.com/Shopify/sarama/request.go generated vendored Normal file
View File

@ -0,0 +1,119 @@
package sarama
import (
"encoding/binary"
"fmt"
"io"
)
type protocolBody interface {
encoder
versionedDecoder
key() int16
version() int16
requiredVersion() KafkaVersion
}
type request struct {
correlationID int32
clientID string
body protocolBody
}
func (r *request) encode(pe packetEncoder) (err error) {
pe.push(&lengthField{})
pe.putInt16(r.body.key())
pe.putInt16(r.body.version())
pe.putInt32(r.correlationID)
err = pe.putString(r.clientID)
if err != nil {
return err
}
err = r.body.encode(pe)
if err != nil {
return err
}
return pe.pop()
}
func (r *request) decode(pd packetDecoder) (err error) {
var key int16
if key, err = pd.getInt16(); err != nil {
return err
}
var version int16
if version, err = pd.getInt16(); err != nil {
return err
}
if r.correlationID, err = pd.getInt32(); err != nil {
return err
}
r.clientID, err = pd.getString()
r.body = allocateBody(key, version)
if r.body == nil {
return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
}
return r.body.decode(pd, version)
}
func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
lengthBytes := make([]byte, 4)
if _, err := io.ReadFull(r, lengthBytes); err != nil {
return nil, bytesRead, err
}
bytesRead += len(lengthBytes)
length := int32(binary.BigEndian.Uint32(lengthBytes))
if length <= 4 || length > MaxRequestSize {
return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
}
encodedReq := make([]byte, length)
if _, err := io.ReadFull(r, encodedReq); err != nil {
return nil, bytesRead, err
}
bytesRead += len(encodedReq)
req = &request{}
if err := decode(encodedReq, req); err != nil {
return nil, bytesRead, err
}
return req, bytesRead, nil
}
func allocateBody(key, version int16) protocolBody {
switch key {
case 0:
return &ProduceRequest{}
case 1:
return &FetchRequest{}
case 2:
return &OffsetRequest{Version: version}
case 3:
return &MetadataRequest{}
case 8:
return &OffsetCommitRequest{Version: version}
case 9:
return &OffsetFetchRequest{}
case 10:
return &ConsumerMetadataRequest{}
case 11:
return &JoinGroupRequest{}
case 12:
return &HeartbeatRequest{}
case 13:
return &LeaveGroupRequest{}
case 14:
return &SyncGroupRequest{}
case 15:
return &DescribeGroupsRequest{}
case 16:
return &ListGroupsRequest{}
case 17:
return &SaslHandshakeRequest{}
case 18:
return &ApiVersionsRequest{}
}
return nil
}

21
vendor/github.com/Shopify/sarama/response_header.go generated vendored Normal file
View File

@ -0,0 +1,21 @@
package sarama
import "fmt"
type responseHeader struct {
length int32
correlationID int32
}
func (r *responseHeader) decode(pd packetDecoder) (err error) {
r.length, err = pd.getInt32()
if err != nil {
return err
}
if r.length <= 4 || r.length > MaxResponseSize {
return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
}
r.correlationID, err = pd.getInt32()
return err
}

99
vendor/github.com/Shopify/sarama/sarama.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
/*
Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
SyncProducer can still sometimes be lost.
To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
and message sent on the wire; the Client provides higher-level metadata management that is shared between
the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
exactly with the protocol fields documented by Kafka at
https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
Broker related metrics:
+----------------------------------------------+------------+---------------------------------------------------------------+
| Name | Type | Description |
+----------------------------------------------+------------+---------------------------------------------------------------+
| incoming-byte-rate | meter | Bytes/second read off all brokers |
| incoming-byte-rate-for-broker-<broker-id> | meter | Bytes/second read off a given broker |
| outgoing-byte-rate | meter | Bytes/second written off all brokers |
| outgoing-byte-rate-for-broker-<broker-id> | meter | Bytes/second written off a given broker |
| request-rate | meter | Requests/second sent to all brokers |
| request-rate-for-broker-<broker-id> | meter | Requests/second sent to a given broker |
| request-size | histogram | Distribution of the request size in bytes for all brokers |
| request-size-for-broker-<broker-id> | histogram | Distribution of the request size in bytes for a given broker |
| request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
| request-latency-in-ms-for-broker-<broker-id> | histogram | Distribution of the request latency in ms for a given broker |
| response-rate | meter | Responses/second received from all brokers |
| response-rate-for-broker-<broker-id> | meter | Responses/second received from a given broker |
| response-size | histogram | Distribution of the response size in bytes for all brokers |
| response-size-for-broker-<broker-id> | histogram | Distribution of the response size in bytes for a given broker |
+----------------------------------------------+------------+---------------------------------------------------------------+
Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
Producer related metrics:
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
| Name | Type | Description |
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
| batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
| batch-size-for-topic-<topic> | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
| record-send-rate | meter | Records/second sent to all topics |
| record-send-rate-for-topic-<topic> | meter | Records/second sent to a given topic |
| records-per-request | histogram | Distribution of the number of records sent per request for all topics |
| records-per-request-for-topic-<topic> | histogram | Distribution of the number of records sent per request for a given topic |
| compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
| compression-ratio-for-topic-<topic> | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
+-------------------------------------------+------------+--------------------------------------------------------------------------------------+
*/
package sarama
import (
"io/ioutil"
"log"
)
// Logger is the instance of a StdLogger interface that Sarama writes connection
// management events to. By default it is set to discard all log messages via ioutil.Discard,
// but you can set it to redirect wherever you want.
var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
// StdLogger is used to log error messages.
type StdLogger interface {
Print(v ...interface{})
Printf(format string, v ...interface{})
Println(v ...interface{})
}
// PanicHandler is called for recovering from panics spawned internally to the library (and thus
// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
var PanicHandler func(interface{})
// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
// to process.
var MaxRequestSize int32 = 100 * 1024 * 1024
// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
// protect the client from running out of memory. Please note that brokers do not have any natural limit on
// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
// (see https://issues.apache.org/jira/browse/KAFKA-2063).
var MaxResponseSize int32 = 100 * 1024 * 1024

View File

@ -0,0 +1,33 @@
package sarama
type SaslHandshakeRequest struct {
Mechanism string
}
func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.Mechanism); err != nil {
return err
}
return nil
}
func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
if r.Mechanism, err = pd.getString(); err != nil {
return err
}
return nil
}
func (r *SaslHandshakeRequest) key() int16 {
return 17
}
func (r *SaslHandshakeRequest) version() int16 {
return 0
}
func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
return V0_10_0_0
}

View File

@ -0,0 +1,38 @@
package sarama
type SaslHandshakeResponse struct {
Err KError
EnabledMechanisms []string
}
func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return pe.putStringArray(r.EnabledMechanisms)
}
func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
return err
}
return nil
}
func (r *SaslHandshakeResponse) key() int16 {
return 17
}
func (r *SaslHandshakeResponse) version() int16 {
return 0
}
func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
return V0_10_0_0
}

100
vendor/github.com/Shopify/sarama/sync_group_request.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
package sarama
type SyncGroupRequest struct {
GroupId string
GenerationId int32
MemberId string
GroupAssignments map[string][]byte
}
func (r *SyncGroupRequest) encode(pe packetEncoder) error {
if err := pe.putString(r.GroupId); err != nil {
return err
}
pe.putInt32(r.GenerationId)
if err := pe.putString(r.MemberId); err != nil {
return err
}
if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
return err
}
for memberId, memberAssignment := range r.GroupAssignments {
if err := pe.putString(memberId); err != nil {
return err
}
if err := pe.putBytes(memberAssignment); err != nil {
return err
}
}
return nil
}
func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
if r.GroupId, err = pd.getString(); err != nil {
return
}
if r.GenerationId, err = pd.getInt32(); err != nil {
return
}
if r.MemberId, err = pd.getString(); err != nil {
return
}
n, err := pd.getArrayLength()
if err != nil {
return err
}
if n == 0 {
return nil
}
r.GroupAssignments = make(map[string][]byte)
for i := 0; i < n; i++ {
memberId, err := pd.getString()
if err != nil {
return err
}
memberAssignment, err := pd.getBytes()
if err != nil {
return err
}
r.GroupAssignments[memberId] = memberAssignment
}
return nil
}
func (r *SyncGroupRequest) key() int16 {
return 14
}
func (r *SyncGroupRequest) version() int16 {
return 0
}
func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
return V0_9_0_0
}
func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
if r.GroupAssignments == nil {
r.GroupAssignments = make(map[string][]byte)
}
r.GroupAssignments[memberId] = memberAssignment
}
func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
bin, err := encode(memberAssignment, nil)
if err != nil {
return err
}
r.AddGroupAssignment(memberId, bin)
return nil
}

View File

@ -0,0 +1,41 @@
package sarama
type SyncGroupResponse struct {
Err KError
MemberAssignment []byte
}
func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
assignment := new(ConsumerGroupMemberAssignment)
err := decode(r.MemberAssignment, assignment)
return assignment, err
}
func (r *SyncGroupResponse) encode(pe packetEncoder) error {
pe.putInt16(int16(r.Err))
return pe.putBytes(r.MemberAssignment)
}
func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
kerr, err := pd.getInt16()
if err != nil {
return err
}
r.Err = KError(kerr)
r.MemberAssignment, err = pd.getBytes()
return
}
func (r *SyncGroupResponse) key() int16 {
return 14
}
func (r *SyncGroupResponse) version() int16 {
return 0
}
func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
return V0_9_0_0
}

164
vendor/github.com/Shopify/sarama/sync_producer.go generated vendored Normal file
View File

@ -0,0 +1,164 @@
package sarama
import "sync"
// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
//
// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
//
// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
// be set to true in its configuration.
type SyncProducer interface {
// SendMessage produces a given message, and returns only when it either has
// succeeded or failed to produce. It will return the partition and the offset
// of the produced message, or an error if the message failed to produce.
SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
// SendMessages produces a given set of messages, and returns only when all
// messages in the set have either succeeded or failed. Note that messages
// can succeed and fail individually; if some succeed and some fail,
// SendMessages will return an error.
SendMessages(msgs []*ProducerMessage) error
// Close shuts down the producer and waits for any buffered messages to be
// flushed. You must call this function before a producer object passes out of
// scope, as it may otherwise leak memory. You must call this before calling
// Close on the underlying client.
Close() error
}
type syncProducer struct {
producer *asyncProducer
wg sync.WaitGroup
}
// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
if config == nil {
config = NewConfig()
config.Producer.Return.Successes = true
}
if err := verifyProducerConfig(config); err != nil {
return nil, err
}
p, err := NewAsyncProducer(addrs, config)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
// necessary to call Close() on the underlying client when shutting down this producer.
func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
if err := verifyProducerConfig(client.Config()); err != nil {
return nil, err
}
p, err := NewAsyncProducerFromClient(client)
if err != nil {
return nil, err
}
return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
}
func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
sp := &syncProducer{producer: p}
sp.wg.Add(2)
go withRecover(sp.handleSuccesses)
go withRecover(sp.handleErrors)
return sp
}
func verifyProducerConfig(config *Config) error {
if !config.Producer.Return.Errors {
return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
}
if !config.Producer.Return.Successes {
return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
}
return nil
}
func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
oldMetadata := msg.Metadata
defer func() {
msg.Metadata = oldMetadata
}()
expectation := make(chan *ProducerError, 1)
msg.Metadata = expectation
sp.producer.Input() <- msg
if err := <-expectation; err != nil {
return -1, -1, err.Err
}
return msg.Partition, msg.Offset, nil
}
func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
savedMetadata := make([]interface{}, len(msgs))
for i := range msgs {
savedMetadata[i] = msgs[i].Metadata
}
defer func() {
for i := range msgs {
msgs[i].Metadata = savedMetadata[i]
}
}()
expectations := make(chan chan *ProducerError, len(msgs))
go func() {
for _, msg := range msgs {
expectation := make(chan *ProducerError, 1)
msg.Metadata = expectation
sp.producer.Input() <- msg
expectations <- expectation
}
close(expectations)
}()
var errors ProducerErrors
for expectation := range expectations {
if err := <-expectation; err != nil {
errors = append(errors, err)
}
}
if len(errors) > 0 {
return errors
}
return nil
}
func (sp *syncProducer) handleSuccesses() {
defer sp.wg.Done()
for msg := range sp.producer.Successes() {
expectation := msg.Metadata.(chan *ProducerError)
expectation <- nil
}
}
func (sp *syncProducer) handleErrors() {
defer sp.wg.Done()
for err := range sp.producer.Errors() {
expectation := err.Msg.Metadata.(chan *ProducerError)
expectation <- err
}
}
func (sp *syncProducer) Close() error {
sp.producer.AsyncClose()
sp.wg.Wait()
return nil
}

150
vendor/github.com/Shopify/sarama/utils.go generated vendored Normal file
View File

@ -0,0 +1,150 @@
package sarama
import (
"bufio"
"net"
)
type none struct{}
// make []int32 sortable so we can sort partition numbers
type int32Slice []int32
func (slice int32Slice) Len() int {
return len(slice)
}
func (slice int32Slice) Less(i, j int) bool {
return slice[i] < slice[j]
}
func (slice int32Slice) Swap(i, j int) {
slice[i], slice[j] = slice[j], slice[i]
}
func dupInt32Slice(input []int32) []int32 {
ret := make([]int32, 0, len(input))
for _, val := range input {
ret = append(ret, val)
}
return ret
}
func withRecover(fn func()) {
defer func() {
handler := PanicHandler
if handler != nil {
if err := recover(); err != nil {
handler(err)
}
}
}()
fn()
}
func safeAsyncClose(b *Broker) {
tmp := b // local var prevents clobbering in goroutine
go withRecover(func() {
if connected, _ := tmp.Connected(); connected {
if err := tmp.Close(); err != nil {
Logger.Println("Error closing broker", tmp.ID(), ":", err)
}
}
})
}
// Encoder is a simple interface for any type that can be encoded as an array of bytes
// in order to be sent as the key or value of a Kafka message. Length() is provided as an
// optimization, and must return the same as len() on the result of Encode().
type Encoder interface {
Encode() ([]byte, error)
Length() int
}
// make strings and byte slices encodable for convenience so they can be used as keys
// and/or values in kafka messages
// StringEncoder implements the Encoder interface for Go strings so that they can be used
// as the Key or Value in a ProducerMessage.
type StringEncoder string
func (s StringEncoder) Encode() ([]byte, error) {
return []byte(s), nil
}
func (s StringEncoder) Length() int {
return len(s)
}
// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
// as the Key or Value in a ProducerMessage.
type ByteEncoder []byte
func (b ByteEncoder) Encode() ([]byte, error) {
return b, nil
}
func (b ByteEncoder) Length() int {
return len(b)
}
// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
// reads that trigger syscalls.
type bufConn struct {
net.Conn
buf *bufio.Reader
}
func newBufConn(conn net.Conn) *bufConn {
return &bufConn{
Conn: conn,
buf: bufio.NewReader(conn),
}
}
func (bc *bufConn) Read(b []byte) (n int, err error) {
return bc.buf.Read(b)
}
// KafkaVersion instances represent versions of the upstream Kafka broker.
type KafkaVersion struct {
// it's a struct rather than just typing the array directly to make it opaque and stop people
// generating their own arbitrary versions
version [4]uint
}
func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
return KafkaVersion{
version: [4]uint{major, minor, veryMinor, patch},
}
}
// IsAtLeast return true if and only if the version it is called on is
// greater than or equal to the version passed in:
// V1.IsAtLeast(V2) // false
// V2.IsAtLeast(V1) // true
func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
for i := range v.version {
if v.version[i] > other.version[i] {
return true
} else if v.version[i] < other.version[i] {
return false
}
}
return true
}
// Effective constants defining the supported kafka versions.
var (
V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
V0_10_2_0 = newKafkaVersion(0, 10, 2, 0)
minVersion = V0_8_2_0
)

25
vendor/github.com/Xe/ln/doc.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
/*
Package ln is the Natural Logger for Go
`ln` provides a simple interface to logging, and metrics, and
obviates the need to utilize purpose built metrics packages, like
`go-metrics` for simple use cases.
The design of `ln` centers around the idea of key-value pairs, which
can be interpreted on the fly, but "Filters" to do things such as
aggregated metrics, and report said metrics to, say Librato, or
statsd.
"Filters" are like WSGI, or Rack Middleware. They are run "top down"
and can abort an emitted log's output at any time, or continue to let
it through the chain. However, the interface is slightly different
than that. Rather than encapsulating the chain with partial function
application, we utilize a simpler method, namely, each plugin defines
an `Apply` function, which takes as an argument the log event, and
performs the work of the plugin, only if the Plugin "Applies" to this
log event.
If `Apply` returns `false`, the iteration through the rest of the
filters is aborted, and the log is dropped from further processing.
*/
package ln

67
vendor/github.com/Xe/ln/filter.go generated vendored Normal file
View File

@ -0,0 +1,67 @@
package ln
import (
"context"
"io"
"sync"
)
// Filter interface for defining chain filters
type Filter interface {
Apply(ctx context.Context, e Event) bool
Run()
Close()
}
// FilterFunc allows simple functions to implement the Filter interface
type FilterFunc func(ctx context.Context, e Event) bool
// Apply implements the Filter interface
func (ff FilterFunc) Apply(ctx context.Context, e Event) bool {
return ff(ctx, e)
}
// Run implements the Filter interface
func (ff FilterFunc) Run() {}
// Close implements the Filter interface
func (ff FilterFunc) Close() {}
// WriterFilter implements a filter, which arbitrarily writes to an io.Writer
type WriterFilter struct {
sync.Mutex
Out io.Writer
Formatter Formatter
}
// NewWriterFilter creates a filter to add to the chain
func NewWriterFilter(out io.Writer, format Formatter) *WriterFilter {
if format == nil {
format = DefaultFormatter
}
return &WriterFilter{
Out: out,
Formatter: format,
}
}
// Apply implements the Filter interface
func (w *WriterFilter) Apply(ctx context.Context, e Event) bool {
output, err := w.Formatter.Format(ctx, e)
if err == nil {
w.Lock()
w.Out.Write(output)
w.Unlock()
}
return true
}
// Run implements the Filter interface
func (w *WriterFilter) Run() {}
// Close implements the Filter interface
func (w *WriterFilter) Close() {}
// NilFilter is safe to return as a Filter, but does nothing
var NilFilter = FilterFunc(func(_ context.Context, e Event) bool { return true })

111
vendor/github.com/Xe/ln/formatter.go generated vendored Normal file
View File

@ -0,0 +1,111 @@
package ln
import (
"bytes"
"context"
"fmt"
"time"
)
var (
// DefaultTimeFormat represents the way in which time will be formatted by default
DefaultTimeFormat = time.RFC3339
)
// Formatter defines the formatting of events
type Formatter interface {
Format(ctx context.Context, e Event) ([]byte, error)
}
// DefaultFormatter is the default way in which to format events
var DefaultFormatter Formatter
func init() {
DefaultFormatter = NewTextFormatter()
}
// TextFormatter formats events as key value pairs.
// Any remaining text not wrapped in an instance of `F` will be
// placed at the end.
type TextFormatter struct {
TimeFormat string
}
// NewTextFormatter returns a Formatter that outputs as text.
func NewTextFormatter() Formatter {
return &TextFormatter{TimeFormat: DefaultTimeFormat}
}
// Format implements the Formatter interface
func (t *TextFormatter) Format(_ context.Context, e Event) ([]byte, error) {
var writer bytes.Buffer
writer.WriteString("time=\"")
writer.WriteString(e.Time.Format(t.TimeFormat))
writer.WriteString("\"")
keys := make([]string, len(e.Data))
i := 0
for k := range e.Data {
keys[i] = k
i++
}
for _, k := range keys {
v := e.Data[k]
writer.WriteByte(' ')
if shouldQuote(k) {
writer.WriteString(fmt.Sprintf("%q", k))
} else {
writer.WriteString(k)
}
writer.WriteByte('=')
switch v.(type) {
case string:
vs, _ := v.(string)
if shouldQuote(vs) {
fmt.Fprintf(&writer, "%q", vs)
} else {
writer.WriteString(vs)
}
case error:
tmperr, _ := v.(error)
es := tmperr.Error()
if shouldQuote(es) {
fmt.Fprintf(&writer, "%q", es)
} else {
writer.WriteString(es)
}
case time.Time:
tmptime, _ := v.(time.Time)
writer.WriteString(tmptime.Format(time.RFC3339))
default:
fmt.Fprint(&writer, v)
}
}
if len(e.Message) > 0 {
fmt.Fprintf(&writer, " _msg=%q", e.Message)
}
writer.WriteByte('\n')
return writer.Bytes(), nil
}
func shouldQuote(s string) bool {
for _, b := range s {
if !((b >= 'A' && b <= 'Z') ||
(b >= 'a' && b <= 'z') ||
(b >= '0' && b <= '9') ||
(b == '-' || b == '.' || b == '#' ||
b == '/' || b == '_')) {
return true
}
}
return false
}

175
vendor/github.com/Xe/ln/logger.go generated vendored Normal file
View File

@ -0,0 +1,175 @@
package ln
import (
"context"
"os"
"time"
"github.com/pkg/errors"
)
// Logger holds the current priority and list of filters
type Logger struct {
Filters []Filter
}
// DefaultLogger is the default implementation of Logger
var DefaultLogger *Logger
func init() {
var defaultFilters []Filter
// Default to STDOUT for logging, but allow LN_OUT to change it.
out := os.Stdout
if os.Getenv("LN_OUT") == "<stderr>" {
out = os.Stderr
}
defaultFilters = append(defaultFilters, NewWriterFilter(out, nil))
DefaultLogger = &Logger{
Filters: defaultFilters,
}
}
// F is a key-value mapping for structured data.
type F map[string]interface{}
// Extend concatentates one F with one or many Fer instances.
func (f F) Extend(other ...Fer) {
for _, ff := range other {
for k, v := range ff.F() {
f[k] = v
}
}
}
// F makes F an Fer
func (f F) F() F {
return f
}
// Fer allows any type to add fields to the structured logging key->value pairs.
type Fer interface {
F() F
}
// Event represents an event
type Event struct {
Time time.Time
Data F
Message string
}
// Log is the generic logging method.
func (l *Logger) Log(ctx context.Context, xs ...Fer) {
event := Event{Time: time.Now()}
addF := func(bf F) {
if event.Data == nil {
event.Data = bf
} else {
for k, v := range bf {
event.Data[k] = v
}
}
}
for _, f := range xs {
addF(f.F())
}
if os.Getenv("LN_DEBUG_ALL_EVENTS") == "1" {
frame := callersFrame()
if event.Data == nil {
event.Data = make(F)
}
event.Data["_lineno"] = frame.lineno
event.Data["_function"] = frame.function
event.Data["_filename"] = frame.filename
}
l.filter(ctx, event)
}
func (l *Logger) filter(ctx context.Context, e Event) {
for _, f := range l.Filters {
if !f.Apply(ctx, e) {
return
}
}
}
// Error logs an error and information about the context of said error.
func (l *Logger) Error(ctx context.Context, err error, xs ...Fer) {
data := F{}
frame := callersFrame()
data["_lineno"] = frame.lineno
data["_function"] = frame.function
data["_filename"] = frame.filename
data["err"] = err
cause := errors.Cause(err)
if cause != nil {
data["cause"] = cause.Error()
}
xs = append(xs, data)
l.Log(ctx, xs...)
}
// Fatal logs this set of values, then exits with status code 1.
func (l *Logger) Fatal(ctx context.Context, xs ...Fer) {
xs = append(xs, F{"fatal": true})
l.Log(ctx, xs...)
os.Exit(1)
}
// FatalErr combines Fatal and Error.
func (l *Logger) FatalErr(ctx context.Context, err error, xs ...Fer) {
xs = append(xs, F{"fatal": true})
data := F{}
frame := callersFrame()
data["_lineno"] = frame.lineno
data["_function"] = frame.function
data["_filename"] = frame.filename
data["err"] = err
cause := errors.Cause(err)
if cause != nil {
data["cause"] = cause.Error()
}
xs = append(xs, data)
l.Log(ctx, xs...)
os.Exit(1)
}
// Default Implementation
// Log is the generic logging method.
func Log(ctx context.Context, xs ...Fer) {
DefaultLogger.Log(ctx, xs...)
}
// Error logs an error and information about the context of said error.
func Error(ctx context.Context, err error, xs ...Fer) {
DefaultLogger.Error(ctx, err, xs...)
}
// Fatal logs this set of values, then exits with status code 1.
func Fatal(ctx context.Context, xs ...Fer) {
DefaultLogger.Fatal(ctx, xs...)
}
// FatalErr combines Fatal and Error.
func FatalErr(ctx context.Context, err error, xs ...Fer) {
DefaultLogger.FatalErr(ctx, err, xs...)
}

44
vendor/github.com/Xe/ln/stack.go generated vendored Normal file
View File

@ -0,0 +1,44 @@
package ln
import (
"os"
"runtime"
"strings"
)
type frame struct {
filename string
function string
lineno int
}
// skips 2 frames, since Caller returns the current frame, and we need
// the caller's caller.
func callersFrame() frame {
var out frame
pc, file, line, ok := runtime.Caller(3)
if !ok {
return out
}
srcLoc := strings.LastIndex(file, "/src/")
if srcLoc >= 0 {
file = file[srcLoc+5:]
}
out.filename = file
out.function = functionName(pc)
out.lineno = line
return out
}
func functionName(pc uintptr) string {
fn := runtime.FuncForPC(pc)
if fn == nil {
return "???"
}
name := fn.Name()
beg := strings.LastIndex(name, string(os.PathSeparator))
return name[beg+1:]
// end := strings.LastIndex(name, string(os.PathSeparator))
// return name[end+1 : len(name)]
}

View File

@ -0,0 +1,142 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
const (
UNKNOWN_APPLICATION_EXCEPTION = 0
UNKNOWN_METHOD = 1
INVALID_MESSAGE_TYPE_EXCEPTION = 2
WRONG_METHOD_NAME = 3
BAD_SEQUENCE_ID = 4
MISSING_RESULT = 5
INTERNAL_ERROR = 6
PROTOCOL_ERROR = 7
)
// Application level Thrift exception
type TApplicationException interface {
TException
TypeId() int32
Read(iprot TProtocol) (TApplicationException, error)
Write(oprot TProtocol) error
}
type tApplicationException struct {
message string
type_ int32
}
func (e tApplicationException) Error() string {
return e.message
}
func NewTApplicationException(type_ int32, message string) TApplicationException {
return &tApplicationException{message, type_}
}
func (p *tApplicationException) TypeId() int32 {
return p.type_
}
func (p *tApplicationException) Read(iprot TProtocol) (TApplicationException, error) {
_, err := iprot.ReadStructBegin()
if err != nil {
return nil, err
}
message := ""
type_ := int32(UNKNOWN_APPLICATION_EXCEPTION)
for {
_, ttype, id, err := iprot.ReadFieldBegin()
if err != nil {
return nil, err
}
if ttype == STOP {
break
}
switch id {
case 1:
if ttype == STRING {
if message, err = iprot.ReadString(); err != nil {
return nil, err
}
} else {
if err = SkipDefaultDepth(iprot, ttype); err != nil {
return nil, err
}
}
case 2:
if ttype == I32 {
if type_, err = iprot.ReadI32(); err != nil {
return nil, err
}
} else {
if err = SkipDefaultDepth(iprot, ttype); err != nil {
return nil, err
}
}
default:
if err = SkipDefaultDepth(iprot, ttype); err != nil {
return nil, err
}
}
if err = iprot.ReadFieldEnd(); err != nil {
return nil, err
}
}
return NewTApplicationException(type_, message), iprot.ReadStructEnd()
}
func (p *tApplicationException) Write(oprot TProtocol) (err error) {
err = oprot.WriteStructBegin("TApplicationException")
if len(p.Error()) > 0 {
err = oprot.WriteFieldBegin("message", STRING, 1)
if err != nil {
return
}
err = oprot.WriteString(p.Error())
if err != nil {
return
}
err = oprot.WriteFieldEnd()
if err != nil {
return
}
}
err = oprot.WriteFieldBegin("type", I32, 2)
if err != nil {
return
}
err = oprot.WriteI32(p.type_)
if err != nil {
return
}
err = oprot.WriteFieldEnd()
if err != nil {
return
}
err = oprot.WriteFieldStop()
if err != nil {
return
}
err = oprot.WriteStructEnd()
return
}

View File

@ -0,0 +1,514 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"encoding/binary"
"errors"
"fmt"
"io"
"math"
)
type TBinaryProtocol struct {
trans TRichTransport
origTransport TTransport
reader io.Reader
writer io.Writer
strictRead bool
strictWrite bool
buffer [64]byte
}
type TBinaryProtocolFactory struct {
strictRead bool
strictWrite bool
}
func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol {
return NewTBinaryProtocol(t, false, true)
}
func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol {
p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite}
if et, ok := t.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(t)
}
p.reader = p.trans
p.writer = p.trans
return p
}
func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory {
return NewTBinaryProtocolFactory(false, true)
}
func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory {
return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite}
}
func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol {
return NewTBinaryProtocol(t, p.strictRead, p.strictWrite)
}
/**
* Writing Methods
*/
func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
if p.strictWrite {
version := uint32(VERSION_1) | uint32(typeId)
e := p.WriteI32(int32(version))
if e != nil {
return e
}
e = p.WriteString(name)
if e != nil {
return e
}
e = p.WriteI32(seqId)
return e
} else {
e := p.WriteString(name)
if e != nil {
return e
}
e = p.WriteByte(int8(typeId))
if e != nil {
return e
}
e = p.WriteI32(seqId)
return e
}
return nil
}
func (p *TBinaryProtocol) WriteMessageEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteStructBegin(name string) error {
return nil
}
func (p *TBinaryProtocol) WriteStructEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
e := p.WriteByte(int8(typeId))
if e != nil {
return e
}
e = p.WriteI16(id)
return e
}
func (p *TBinaryProtocol) WriteFieldEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteFieldStop() error {
e := p.WriteByte(STOP)
return e
}
func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
e := p.WriteByte(int8(keyType))
if e != nil {
return e
}
e = p.WriteByte(int8(valueType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteMapEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error {
e := p.WriteByte(int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteListEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error {
e := p.WriteByte(int8(elemType))
if e != nil {
return e
}
e = p.WriteI32(int32(size))
return e
}
func (p *TBinaryProtocol) WriteSetEnd() error {
return nil
}
func (p *TBinaryProtocol) WriteBool(value bool) error {
if value {
return p.WriteByte(1)
}
return p.WriteByte(0)
}
func (p *TBinaryProtocol) WriteByte(value int8) error {
e := p.trans.WriteByte(byte(value))
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI16(value int16) error {
v := p.buffer[0:2]
binary.BigEndian.PutUint16(v, uint16(value))
_, e := p.writer.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI32(value int32) error {
v := p.buffer[0:4]
binary.BigEndian.PutUint32(v, uint32(value))
_, e := p.writer.Write(v)
return NewTProtocolException(e)
}
func (p *TBinaryProtocol) WriteI64(value int64) error {
v := p.buffer[0:8]
binary.BigEndian.PutUint64(v, uint64(value))
_, err := p.writer.Write(v)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteDouble(value float64) error {
return p.WriteI64(int64(math.Float64bits(value)))
}
func (p *TBinaryProtocol) WriteString(value string) error {
e := p.WriteI32(int32(len(value)))
if e != nil {
return e
}
_, err := p.trans.WriteString(value)
return NewTProtocolException(err)
}
func (p *TBinaryProtocol) WriteBinary(value []byte) error {
e := p.WriteI32(int32(len(value)))
if e != nil {
return e
}
_, err := p.writer.Write(value)
return NewTProtocolException(err)
}
/**
* Reading methods
*/
func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
size, e := p.ReadI32()
if e != nil {
return "", typeId, 0, NewTProtocolException(e)
}
if size < 0 {
typeId = TMessageType(size & 0x0ff)
version := int64(int64(size) & VERSION_MASK)
if version != VERSION_1 {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin"))
}
name, e = p.ReadString()
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
seqId, e = p.ReadI32()
if e != nil {
return name, typeId, seqId, NewTProtocolException(e)
}
return name, typeId, seqId, nil
}
if p.strictRead {
return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin"))
}
name, e2 := p.readStringBody(size)
if e2 != nil {
return name, typeId, seqId, e2
}
b, e3 := p.ReadByte()
if e3 != nil {
return name, typeId, seqId, e3
}
typeId = TMessageType(b)
seqId, e4 := p.ReadI32()
if e4 != nil {
return name, typeId, seqId, e4
}
return name, typeId, seqId, nil
}
func (p *TBinaryProtocol) ReadMessageEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) {
return
}
func (p *TBinaryProtocol) ReadStructEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) {
t, err := p.ReadByte()
typeId = TType(t)
if err != nil {
return name, typeId, seqId, err
}
if t != STOP {
seqId, err = p.ReadI16()
}
return name, typeId, seqId, err
}
func (p *TBinaryProtocol) ReadFieldEnd() error {
return nil
}
var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length"))
func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) {
k, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
kType = TType(k)
v, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
vType = TType(v)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return kType, vType, size, nil
}
func (p *TBinaryProtocol) ReadMapEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) {
b, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return
}
func (p *TBinaryProtocol) ReadListEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) {
b, e := p.ReadByte()
if e != nil {
err = NewTProtocolException(e)
return
}
elemType = TType(b)
size32, e := p.ReadI32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
return elemType, size, nil
}
func (p *TBinaryProtocol) ReadSetEnd() error {
return nil
}
func (p *TBinaryProtocol) ReadBool() (bool, error) {
b, e := p.ReadByte()
v := true
if b != 1 {
v = false
}
return v, e
}
func (p *TBinaryProtocol) ReadByte() (int8, error) {
v, err := p.trans.ReadByte()
return int8(v), err
}
func (p *TBinaryProtocol) ReadI16() (value int16, err error) {
buf := p.buffer[0:2]
err = p.readAll(buf)
value = int16(binary.BigEndian.Uint16(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI32() (value int32, err error) {
buf := p.buffer[0:4]
err = p.readAll(buf)
value = int32(binary.BigEndian.Uint32(buf))
return value, err
}
func (p *TBinaryProtocol) ReadI64() (value int64, err error) {
buf := p.buffer[0:8]
err = p.readAll(buf)
value = int64(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadDouble() (value float64, err error) {
buf := p.buffer[0:8]
err = p.readAll(buf)
value = math.Float64frombits(binary.BigEndian.Uint64(buf))
return value, err
}
func (p *TBinaryProtocol) ReadString() (value string, err error) {
size, e := p.ReadI32()
if e != nil {
return "", e
}
if size < 0 {
err = invalidDataLength
return
}
return p.readStringBody(size)
}
func (p *TBinaryProtocol) ReadBinary() ([]byte, error) {
size, e := p.ReadI32()
if e != nil {
return nil, e
}
if size < 0 {
return nil, invalidDataLength
}
if uint64(size) > p.trans.RemainingBytes() {
return nil, invalidDataLength
}
isize := int(size)
buf := make([]byte, isize)
_, err := io.ReadFull(p.trans, buf)
return buf, NewTProtocolException(err)
}
func (p *TBinaryProtocol) Flush() (err error) {
return NewTProtocolException(p.trans.Flush())
}
func (p *TBinaryProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
}
func (p *TBinaryProtocol) Transport() TTransport {
return p.origTransport
}
func (p *TBinaryProtocol) readAll(buf []byte) error {
_, err := io.ReadFull(p.reader, buf)
return NewTProtocolException(err)
}
const readLimit = 32768
func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) {
if size < 0 {
return "", nil
}
if uint64(size) > p.trans.RemainingBytes() {
return "", invalidDataLength
}
var (
buf bytes.Buffer
e error
b []byte
)
switch {
case int(size) <= len(p.buffer):
b = p.buffer[:size] // avoids allocation for small reads
case int(size) < readLimit:
b = make([]byte, size)
default:
b = make([]byte, readLimit)
}
for size > 0 {
_, e = io.ReadFull(p.trans, b)
buf.Write(b)
if e != nil {
break
}
size -= readLimit
if size < readLimit && size > 0 {
b = b[:size]
}
}
return buf.String(), NewTProtocolException(e)
}

View File

@ -0,0 +1,91 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
)
type TBufferedTransportFactory struct {
size int
}
type TBufferedTransport struct {
bufio.ReadWriter
tp TTransport
}
func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
return NewTBufferedTransport(trans, p.size), nil
}
func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory {
return &TBufferedTransportFactory{size: bufferSize}
}
func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport {
return &TBufferedTransport{
ReadWriter: bufio.ReadWriter{
Reader: bufio.NewReaderSize(trans, bufferSize),
Writer: bufio.NewWriterSize(trans, bufferSize),
},
tp: trans,
}
}
func (p *TBufferedTransport) IsOpen() bool {
return p.tp.IsOpen()
}
func (p *TBufferedTransport) Open() (err error) {
return p.tp.Open()
}
func (p *TBufferedTransport) Close() (err error) {
return p.tp.Close()
}
func (p *TBufferedTransport) Read(b []byte) (int, error) {
n, err := p.ReadWriter.Read(b)
if err != nil {
p.ReadWriter.Reader.Reset(p.tp)
}
return n, err
}
func (p *TBufferedTransport) Write(b []byte) (int, error) {
n, err := p.ReadWriter.Write(b)
if err != nil {
p.ReadWriter.Writer.Reset(p.tp)
}
return n, err
}
func (p *TBufferedTransport) Flush() error {
if err := p.ReadWriter.Flush(); err != nil {
p.ReadWriter.Writer.Reset(p.tp)
return err
}
return p.tp.Flush()
}
func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) {
return p.tp.RemainingBytes()
}

View File

@ -0,0 +1,32 @@
// +build go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "context"
type mockProcessor struct {
ProcessFunc func(in, out TProtocol) (bool, TException)
}
func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
return m.ProcessFunc(in, out)
}

View File

@ -0,0 +1,32 @@
// +build !go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "golang.org/x/net/context"
type mockProcessor struct {
ProcessFunc func(in, out TProtocol) (bool, TException)
}
func (m *mockProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
return m.ProcessFunc(in, out)
}

View File

@ -0,0 +1,815 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"encoding/binary"
"fmt"
"io"
"math"
)
const (
COMPACT_PROTOCOL_ID = 0x082
COMPACT_VERSION = 1
COMPACT_VERSION_MASK = 0x1f
COMPACT_TYPE_MASK = 0x0E0
COMPACT_TYPE_BITS = 0x07
COMPACT_TYPE_SHIFT_AMOUNT = 5
)
type tCompactType byte
const (
COMPACT_BOOLEAN_TRUE = 0x01
COMPACT_BOOLEAN_FALSE = 0x02
COMPACT_BYTE = 0x03
COMPACT_I16 = 0x04
COMPACT_I32 = 0x05
COMPACT_I64 = 0x06
COMPACT_DOUBLE = 0x07
COMPACT_BINARY = 0x08
COMPACT_LIST = 0x09
COMPACT_SET = 0x0A
COMPACT_MAP = 0x0B
COMPACT_STRUCT = 0x0C
)
var (
ttypeToCompactType map[TType]tCompactType
)
func init() {
ttypeToCompactType = map[TType]tCompactType{
STOP: STOP,
BOOL: COMPACT_BOOLEAN_TRUE,
BYTE: COMPACT_BYTE,
I16: COMPACT_I16,
I32: COMPACT_I32,
I64: COMPACT_I64,
DOUBLE: COMPACT_DOUBLE,
STRING: COMPACT_BINARY,
LIST: COMPACT_LIST,
SET: COMPACT_SET,
MAP: COMPACT_MAP,
STRUCT: COMPACT_STRUCT,
}
}
type TCompactProtocolFactory struct{}
func NewTCompactProtocolFactory() *TCompactProtocolFactory {
return &TCompactProtocolFactory{}
}
func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return NewTCompactProtocol(trans)
}
type TCompactProtocol struct {
trans TRichTransport
origTransport TTransport
// Used to keep track of the last field for the current and previous structs,
// so we can do the delta stuff.
lastField []int
lastFieldId int
// If we encounter a boolean field begin, save the TField here so it can
// have the value incorporated.
booleanFieldName string
booleanFieldId int16
booleanFieldPending bool
// If we read a field header, and it's a boolean field, save the boolean
// value here so that readBool can use it.
boolValue bool
boolValueIsNotNull bool
buffer [64]byte
}
// Create a TCompactProtocol given a TTransport
func NewTCompactProtocol(trans TTransport) *TCompactProtocol {
p := &TCompactProtocol{origTransport: trans, lastField: []int{}}
if et, ok := trans.(TRichTransport); ok {
p.trans = et
} else {
p.trans = NewTRichTransport(trans)
}
return p
}
//
// Public Writing methods.
//
// Write a message header to the wire. Compact Protocol messages contain the
// protocol version so we can migrate forwards in the future if need be.
func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
err := p.writeByteDirect(COMPACT_PROTOCOL_ID)
if err != nil {
return NewTProtocolException(err)
}
err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK))
if err != nil {
return NewTProtocolException(err)
}
_, err = p.writeVarint32(seqid)
if err != nil {
return NewTProtocolException(err)
}
e := p.WriteString(name)
return e
}
func (p *TCompactProtocol) WriteMessageEnd() error { return nil }
// Write a struct begin. This doesn't actually put anything on the wire. We
// use it as an opportunity to put special placeholder markers on the field
// stack so we can get the field id deltas correct.
func (p *TCompactProtocol) WriteStructBegin(name string) error {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return nil
}
// Write a struct end. This doesn't actually put anything on the wire. We use
// this as an opportunity to pop the last field from the current struct off
// of the field stack.
func (p *TCompactProtocol) WriteStructEnd() error {
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
if typeId == BOOL {
// we want to possibly include the value, so we'll wait.
p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true
return nil
}
_, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF)
return NewTProtocolException(err)
}
// The workhorse of writeFieldBegin. It has the option of doing a
// 'type override' of the type header. This is used specifically in the
// boolean field case.
func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) {
// short lastField = lastField_.pop();
// if there's a type override, use that.
var typeToWrite byte
if typeOverride == 0xFF {
typeToWrite = byte(p.getCompactType(typeId))
} else {
typeToWrite = typeOverride
}
// check if we can use delta encoding for the field id
fieldId := int(id)
written := 0
if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 {
// write them together
err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite)
if err != nil {
return 0, err
}
} else {
// write them separate
err := p.writeByteDirect(typeToWrite)
if err != nil {
return 0, err
}
err = p.WriteI16(id)
written = 1 + 2
if err != nil {
return 0, err
}
}
p.lastFieldId = fieldId
// p.lastField.Push(field.id);
return written, nil
}
func (p *TCompactProtocol) WriteFieldEnd() error { return nil }
func (p *TCompactProtocol) WriteFieldStop() error {
err := p.writeByteDirect(STOP)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
if size == 0 {
err := p.writeByteDirect(0)
return NewTProtocolException(err)
}
_, err := p.writeVarint32(int32(size))
if err != nil {
return NewTProtocolException(err)
}
err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType)))
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteMapEnd() error { return nil }
// Write a list header.
func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteListEnd() error { return nil }
// Write a set header.
func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error {
_, err := p.writeCollectionBegin(elemType, size)
return NewTProtocolException(err)
}
func (p *TCompactProtocol) WriteSetEnd() error { return nil }
func (p *TCompactProtocol) WriteBool(value bool) error {
v := byte(COMPACT_BOOLEAN_FALSE)
if value {
v = byte(COMPACT_BOOLEAN_TRUE)
}
if p.booleanFieldPending {
// we haven't written the field header yet
_, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v)
p.booleanFieldPending = false
return NewTProtocolException(err)
}
// we're not part of a field, so just write the value.
err := p.writeByteDirect(v)
return NewTProtocolException(err)
}
// Write a byte. Nothing to see here!
func (p *TCompactProtocol) WriteByte(value int8) error {
err := p.writeByteDirect(byte(value))
return NewTProtocolException(err)
}
// Write an I16 as a zigzag varint.
func (p *TCompactProtocol) WriteI16(value int16) error {
_, err := p.writeVarint32(p.int32ToZigzag(int32(value)))
return NewTProtocolException(err)
}
// Write an i32 as a zigzag varint.
func (p *TCompactProtocol) WriteI32(value int32) error {
_, err := p.writeVarint32(p.int32ToZigzag(value))
return NewTProtocolException(err)
}
// Write an i64 as a zigzag varint.
func (p *TCompactProtocol) WriteI64(value int64) error {
_, err := p.writeVarint64(p.int64ToZigzag(value))
return NewTProtocolException(err)
}
// Write a double to the wire as 8 bytes.
func (p *TCompactProtocol) WriteDouble(value float64) error {
buf := p.buffer[0:8]
binary.LittleEndian.PutUint64(buf, math.Float64bits(value))
_, err := p.trans.Write(buf)
return NewTProtocolException(err)
}
// Write a string to the wire with a varint size preceding.
func (p *TCompactProtocol) WriteString(value string) error {
_, e := p.writeVarint32(int32(len(value)))
if e != nil {
return NewTProtocolException(e)
}
if len(value) > 0 {
}
_, e = p.trans.WriteString(value)
return e
}
// Write a byte array, using a varint for the size.
func (p *TCompactProtocol) WriteBinary(bin []byte) error {
_, e := p.writeVarint32(int32(len(bin)))
if e != nil {
return NewTProtocolException(e)
}
if len(bin) > 0 {
_, e = p.trans.Write(bin)
return NewTProtocolException(e)
}
return nil
}
//
// Reading methods.
//
// Read a message header.
func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
protocolId, err := p.readByteDirect()
if err != nil {
return
}
if protocolId != COMPACT_PROTOCOL_ID {
e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId)
return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e)
}
versionAndType, err := p.readByteDirect()
if err != nil {
return
}
version := versionAndType & COMPACT_VERSION_MASK
typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS)
if version != COMPACT_VERSION {
e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version)
err = NewTProtocolExceptionWithType(BAD_VERSION, e)
return
}
seqId, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
return
}
name, err = p.ReadString()
return
}
func (p *TCompactProtocol) ReadMessageEnd() error { return nil }
// Read a struct begin. There's nothing on the wire for this, but it is our
// opportunity to push a new struct begin marker onto the field stack.
func (p *TCompactProtocol) ReadStructBegin() (name string, err error) {
p.lastField = append(p.lastField, p.lastFieldId)
p.lastFieldId = 0
return
}
// Doesn't actually consume any wire data, just removes the last field for
// this struct from the field stack.
func (p *TCompactProtocol) ReadStructEnd() error {
// consume the last field we read off the wire.
p.lastFieldId = p.lastField[len(p.lastField)-1]
p.lastField = p.lastField[:len(p.lastField)-1]
return nil
}
// Read a field header off the wire.
func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
t, err := p.readByteDirect()
if err != nil {
return
}
// if it's a stop, then we can return immediately, as the struct is over.
if (t & 0x0f) == STOP {
return "", STOP, 0, nil
}
// mask off the 4 MSB of the type header. it could contain a field id delta.
modifier := int16((t & 0xf0) >> 4)
if modifier == 0 {
// not a delta. look ahead for the zigzag varint field id.
id, err = p.ReadI16()
if err != nil {
return
}
} else {
// has a delta. add the delta to the last read field id.
id = int16(p.lastFieldId) + modifier
}
typeId, e := p.getTType(tCompactType(t & 0x0f))
if e != nil {
err = NewTProtocolException(e)
return
}
// if this happens to be a boolean field, the value is encoded in the type
if p.isBoolType(t) {
// save the boolean value in a special instance variable.
p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE)
p.boolValueIsNotNull = true
}
// push the new field onto the field stack so we can keep the deltas going.
p.lastFieldId = int(id)
return
}
func (p *TCompactProtocol) ReadFieldEnd() error { return nil }
// Read a map header off the wire. If the size is zero, skip reading the key
// and value type. This means that 0-length maps will yield TMaps without the
// "correct" types.
func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
size32, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size32 < 0 {
err = invalidDataLength
return
}
size = int(size32)
keyAndValueType := byte(STOP)
if size != 0 {
keyAndValueType, err = p.readByteDirect()
if err != nil {
return
}
}
keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4))
valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf))
return
}
func (p *TCompactProtocol) ReadMapEnd() error { return nil }
// Read a list header off the wire. If the list size is 0-14, the size will
// be packed into the element type header. If it's a longer list, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) {
size_and_type, err := p.readByteDirect()
if err != nil {
return
}
size = int((size_and_type >> 4) & 0x0f)
if size == 15 {
size2, e := p.readVarint32()
if e != nil {
err = NewTProtocolException(e)
return
}
if size2 < 0 {
err = invalidDataLength
return
}
size = int(size2)
}
elemType, e := p.getTType(tCompactType(size_and_type))
if e != nil {
err = NewTProtocolException(e)
return
}
return
}
func (p *TCompactProtocol) ReadListEnd() error { return nil }
// Read a set header off the wire. If the set size is 0-14, the size will
// be packed into the element type header. If it's a longer set, the 4 MSB
// of the element type header will be 0xF, and a varint will follow with the
// true size.
func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) {
return p.ReadListBegin()
}
func (p *TCompactProtocol) ReadSetEnd() error { return nil }
// Read a boolean off the wire. If this is a boolean field, the value should
// already have been read during readFieldBegin, so we'll just consume the
// pre-stored value. Otherwise, read a byte.
func (p *TCompactProtocol) ReadBool() (value bool, err error) {
if p.boolValueIsNotNull {
p.boolValueIsNotNull = false
return p.boolValue, nil
}
v, err := p.readByteDirect()
return v == COMPACT_BOOLEAN_TRUE, err
}
// Read a single byte off the wire. Nothing interesting here.
func (p *TCompactProtocol) ReadByte() (int8, error) {
v, err := p.readByteDirect()
if err != nil {
return 0, NewTProtocolException(err)
}
return int8(v), err
}
// Read an i16 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI16() (value int16, err error) {
v, err := p.ReadI32()
return int16(v), err
}
// Read an i32 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI32() (value int32, err error) {
v, e := p.readVarint32()
if e != nil {
return 0, NewTProtocolException(e)
}
value = p.zigzagToInt32(v)
return value, nil
}
// Read an i64 from the wire as a zigzag varint.
func (p *TCompactProtocol) ReadI64() (value int64, err error) {
v, e := p.readVarint64()
if e != nil {
return 0, NewTProtocolException(e)
}
value = p.zigzagToInt64(v)
return value, nil
}
// No magic here - just read a double off the wire.
func (p *TCompactProtocol) ReadDouble() (value float64, err error) {
longBits := p.buffer[0:8]
_, e := io.ReadFull(p.trans, longBits)
if e != nil {
return 0.0, NewTProtocolException(e)
}
return math.Float64frombits(p.bytesToUint64(longBits)), nil
}
// Reads a []byte (via readBinary), and then UTF-8 decodes it.
func (p *TCompactProtocol) ReadString() (value string, err error) {
length, e := p.readVarint32()
if e != nil {
return "", NewTProtocolException(e)
}
if length < 0 {
return "", invalidDataLength
}
if uint64(length) > p.trans.RemainingBytes() {
return "", invalidDataLength
}
if length == 0 {
return "", nil
}
var buf []byte
if length <= int32(len(p.buffer)) {
buf = p.buffer[0:length]
} else {
buf = make([]byte, length)
}
_, e = io.ReadFull(p.trans, buf)
return string(buf), NewTProtocolException(e)
}
// Read a []byte from the wire.
func (p *TCompactProtocol) ReadBinary() (value []byte, err error) {
length, e := p.readVarint32()
if e != nil {
return nil, NewTProtocolException(e)
}
if length == 0 {
return []byte{}, nil
}
if length < 0 {
return nil, invalidDataLength
}
if uint64(length) > p.trans.RemainingBytes() {
return nil, invalidDataLength
}
buf := make([]byte, length)
_, e = io.ReadFull(p.trans, buf)
return buf, NewTProtocolException(e)
}
func (p *TCompactProtocol) Flush() (err error) {
return NewTProtocolException(p.trans.Flush())
}
func (p *TCompactProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
}
func (p *TCompactProtocol) Transport() TTransport {
return p.origTransport
}
//
// Internal writing methods
//
// Abstract method for writing the start of lists and sets. List and sets on
// the wire differ only by the type indicator.
func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) {
if size <= 14 {
return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType))))
}
err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType)))
if err != nil {
return 0, err
}
m, err := p.writeVarint32(int32(size))
return 1 + m, err
}
// Write an i32 as a varint. Results in 1-5 bytes on the wire.
// TODO(pomack): make a permanent buffer like writeVarint64?
func (p *TCompactProtocol) writeVarint32(n int32) (int, error) {
i32buf := p.buffer[0:5]
idx := 0
for {
if (n & ^0x7F) == 0 {
i32buf[idx] = byte(n)
idx++
// p.writeByteDirect(byte(n));
break
// return;
} else {
i32buf[idx] = byte((n & 0x7F) | 0x80)
idx++
// p.writeByteDirect(byte(((n & 0x7F) | 0x80)));
u := uint32(n)
n = int32(u >> 7)
}
}
return p.trans.Write(i32buf[0:idx])
}
// Write an i64 as a varint. Results in 1-10 bytes on the wire.
func (p *TCompactProtocol) writeVarint64(n int64) (int, error) {
varint64out := p.buffer[0:10]
idx := 0
for {
if (n & ^0x7F) == 0 {
varint64out[idx] = byte(n)
idx++
break
} else {
varint64out[idx] = byte((n & 0x7F) | 0x80)
idx++
u := uint64(n)
n = int64(u >> 7)
}
}
return p.trans.Write(varint64out[0:idx])
}
// Convert l into a zigzag long. This allows negative numbers to be
// represented compactly as a varint.
func (p *TCompactProtocol) int64ToZigzag(l int64) int64 {
return (l << 1) ^ (l >> 63)
}
// Convert l into a zigzag long. This allows negative numbers to be
// represented compactly as a varint.
func (p *TCompactProtocol) int32ToZigzag(n int32) int32 {
return (n << 1) ^ (n >> 31)
}
func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) {
binary.LittleEndian.PutUint64(buf, n)
}
func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) {
binary.LittleEndian.PutUint64(buf, uint64(n))
}
// Writes a byte without any possibility of all that field header nonsense.
// Used internally by other writing methods that know they need to write a byte.
func (p *TCompactProtocol) writeByteDirect(b byte) error {
return p.trans.WriteByte(b)
}
// Writes a byte without any possibility of all that field header nonsense.
func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) {
return 1, p.writeByteDirect(byte(n))
}
//
// Internal reading methods
//
// Read an i32 from the wire as a varint. The MSB of each byte is set
// if there is another byte to follow. This can read up to 5 bytes.
func (p *TCompactProtocol) readVarint32() (int32, error) {
// if the wire contains the right stuff, this will just truncate the i64 we
// read and get us the right sign.
v, err := p.readVarint64()
return int32(v), err
}
// Read an i64 from the wire as a proper varint. The MSB of each byte is set
// if there is another byte to follow. This can read up to 10 bytes.
func (p *TCompactProtocol) readVarint64() (int64, error) {
shift := uint(0)
result := int64(0)
for {
b, err := p.readByteDirect()
if err != nil {
return 0, err
}
result |= int64(b&0x7f) << shift
if (b & 0x80) != 0x80 {
break
}
shift += 7
}
return result, nil
}
// Read a byte, unlike ReadByte that reads Thrift-byte that is i8.
func (p *TCompactProtocol) readByteDirect() (byte, error) {
return p.trans.ReadByte()
}
//
// encoding helpers
//
// Convert from zigzag int to int.
func (p *TCompactProtocol) zigzagToInt32(n int32) int32 {
u := uint32(n)
return int32(u>>1) ^ -(n & 1)
}
// Convert from zigzag long to long.
func (p *TCompactProtocol) zigzagToInt64(n int64) int64 {
u := uint64(n)
return int64(u>>1) ^ -(n & 1)
}
// Note that it's important that the mask bytes are long literals,
// otherwise they'll default to ints, and when you shift an int left 56 bits,
// you just get a messed up int.
func (p *TCompactProtocol) bytesToInt64(b []byte) int64 {
return int64(binary.LittleEndian.Uint64(b))
}
// Note that it's important that the mask bytes are long literals,
// otherwise they'll default to ints, and when you shift an int left 56 bits,
// you just get a messed up int.
func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 {
return binary.LittleEndian.Uint64(b)
}
//
// type testing and converting
//
func (p *TCompactProtocol) isBoolType(b byte) bool {
return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE
}
// Given a tCompactType constant, convert it to its corresponding
// TType value.
func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) {
switch byte(t) & 0x0f {
case STOP:
return STOP, nil
case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE:
return BOOL, nil
case COMPACT_BYTE:
return BYTE, nil
case COMPACT_I16:
return I16, nil
case COMPACT_I32:
return I32, nil
case COMPACT_I64:
return I64, nil
case COMPACT_DOUBLE:
return DOUBLE, nil
case COMPACT_BINARY:
return STRING, nil
case COMPACT_LIST:
return LIST, nil
case COMPACT_SET:
return SET, nil
case COMPACT_MAP:
return MAP, nil
case COMPACT_STRUCT:
return STRUCT, nil
}
return STOP, TException(fmt.Errorf("don't know what type: %s", t&0x0f))
}
// Given a TType value, find the appropriate TCompactProtocol.Types constant.
func (p *TCompactProtocol) getCompactType(t TType) tCompactType {
return ttypeToCompactType[t]
}

View File

@ -0,0 +1,269 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"log"
)
type TDebugProtocol struct {
Delegate TProtocol
LogPrefix string
}
type TDebugProtocolFactory struct {
Underlying TProtocolFactory
LogPrefix string
}
func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory {
return &TDebugProtocolFactory{
Underlying: underlying,
LogPrefix: logPrefix,
}
}
func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return &TDebugProtocol{
Delegate: t.Underlying.GetProtocol(trans),
LogPrefix: t.LogPrefix,
}
}
func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid)
log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err)
return err
}
func (tdp *TDebugProtocol) WriteMessageEnd() error {
err := tdp.Delegate.WriteMessageEnd()
log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteStructBegin(name string) error {
err := tdp.Delegate.WriteStructBegin(name)
log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err)
return err
}
func (tdp *TDebugProtocol) WriteStructEnd() error {
err := tdp.Delegate.WriteStructEnd()
log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
err := tdp.Delegate.WriteFieldBegin(name, typeId, id)
log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldEnd() error {
err := tdp.Delegate.WriteFieldEnd()
log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteFieldStop() error {
err := tdp.Delegate.WriteFieldStop()
log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
err := tdp.Delegate.WriteMapBegin(keyType, valueType, size)
log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteMapEnd() error {
err := tdp.Delegate.WriteMapEnd()
log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error {
err := tdp.Delegate.WriteListBegin(elemType, size)
log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteListEnd() error {
err := tdp.Delegate.WriteListEnd()
log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error {
err := tdp.Delegate.WriteSetBegin(elemType, size)
log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err)
return err
}
func (tdp *TDebugProtocol) WriteSetEnd() error {
err := tdp.Delegate.WriteSetEnd()
log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err)
return err
}
func (tdp *TDebugProtocol) WriteBool(value bool) error {
err := tdp.Delegate.WriteBool(value)
log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteByte(value int8) error {
err := tdp.Delegate.WriteByte(value)
log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI16(value int16) error {
err := tdp.Delegate.WriteI16(value)
log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI32(value int32) error {
err := tdp.Delegate.WriteI32(value)
log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteI64(value int64) error {
err := tdp.Delegate.WriteI64(value)
log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteDouble(value float64) error {
err := tdp.Delegate.WriteDouble(value)
log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteString(value string) error {
err := tdp.Delegate.WriteString(value)
log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) WriteBinary(value []byte) error {
err := tdp.Delegate.WriteBinary(value)
log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err)
return err
}
func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin()
log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err)
return
}
func (tdp *TDebugProtocol) ReadMessageEnd() (err error) {
err = tdp.Delegate.ReadMessageEnd()
log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) {
name, err = tdp.Delegate.ReadStructBegin()
log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err)
return
}
func (tdp *TDebugProtocol) ReadStructEnd() (err error) {
err = tdp.Delegate.ReadStructEnd()
log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) {
name, typeId, id, err = tdp.Delegate.ReadFieldBegin()
log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err)
return
}
func (tdp *TDebugProtocol) ReadFieldEnd() (err error) {
err = tdp.Delegate.ReadFieldEnd()
log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) {
keyType, valueType, size, err = tdp.Delegate.ReadMapBegin()
log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err)
return
}
func (tdp *TDebugProtocol) ReadMapEnd() (err error) {
err = tdp.Delegate.ReadMapEnd()
log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadListBegin()
log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
return
}
func (tdp *TDebugProtocol) ReadListEnd() (err error) {
err = tdp.Delegate.ReadListEnd()
log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) {
elemType, size, err = tdp.Delegate.ReadSetBegin()
log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err)
return
}
func (tdp *TDebugProtocol) ReadSetEnd() (err error) {
err = tdp.Delegate.ReadSetEnd()
log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) ReadBool() (value bool, err error) {
value, err = tdp.Delegate.ReadBool()
log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadByte() (value int8, err error) {
value, err = tdp.Delegate.ReadByte()
log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI16() (value int16, err error) {
value, err = tdp.Delegate.ReadI16()
log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI32() (value int32, err error) {
value, err = tdp.Delegate.ReadI32()
log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadI64() (value int64, err error) {
value, err = tdp.Delegate.ReadI64()
log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) {
value, err = tdp.Delegate.ReadDouble()
log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadString() (value string, err error) {
value, err = tdp.Delegate.ReadString()
log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) {
value, err = tdp.Delegate.ReadBinary()
log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err)
return
}
func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) {
err = tdp.Delegate.Skip(fieldType)
log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err)
return
}
func (tdp *TDebugProtocol) Flush() (err error) {
err = tdp.Delegate.Flush()
log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err)
return
}
func (tdp *TDebugProtocol) Transport() TTransport {
return tdp.Delegate.Transport()
}

View File

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
type TDeserializer struct {
Transport TTransport
Protocol TProtocol
}
func NewTDeserializer() *TDeserializer {
var transport TTransport
transport = NewTMemoryBufferLen(1024)
protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport)
return &TDeserializer{
transport,
protocol}
}
func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) {
err = nil
if _, err = t.Transport.Write([]byte(s)); err != nil {
return
}
if err = msg.Read(t.Protocol); err != nil {
return
}
return
}
func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) {
err = nil
if _, err = t.Transport.Write(b); err != nil {
return
}
if err = msg.Read(t.Protocol); err != nil {
return
}
return
}

View File

@ -0,0 +1,44 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"errors"
)
// Generic Thrift exception
type TException interface {
error
}
// Prepends additional information to an error without losing the Thrift exception interface
func PrependError(prepend string, err error) error {
if t, ok := err.(TTransportException); ok {
return NewTTransportException(t.TypeId(), prepend+t.Error())
}
if t, ok := err.(TProtocolException); ok {
return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error()))
}
if t, ok := err.(TApplicationException); ok {
return NewTApplicationException(t.TypeId(), prepend+t.Error())
}
return errors.New(prepend + err.Error())
}

79
vendor/github.com/apache/thrift/lib/go/thrift/field.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Helper class that encapsulates field metadata.
type field struct {
name string
typeId TType
id int
}
func newField(n string, t TType, i int) *field {
return &field{name: n, typeId: t, id: i}
}
func (p *field) Name() string {
if p == nil {
return ""
}
return p.name
}
func (p *field) TypeId() TType {
if p == nil {
return TType(VOID)
}
return p.typeId
}
func (p *field) Id() int {
if p == nil {
return -1
}
return p.id
}
func (p *field) String() string {
if p == nil {
return "<nil>"
}
return "<TField name:'" + p.name + "' type:" + string(p.typeId) + " field-id:" + string(p.id) + ">"
}
var ANONYMOUS_FIELD *field
type fieldSlice []field
func (p fieldSlice) Len() int {
return len(p)
}
func (p fieldSlice) Less(i, j int) bool {
return p[i].Id() < p[j].Id()
}
func (p fieldSlice) Swap(i, j int) {
p[i], p[j] = p[j], p[i]
}
func init() {
ANONYMOUS_FIELD = newField("", STOP, 0)
}

View File

@ -0,0 +1,172 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"bytes"
"encoding/binary"
"fmt"
"io"
)
const DEFAULT_MAX_LENGTH = 16384000
type TFramedTransport struct {
transport TTransport
buf bytes.Buffer
reader *bufio.Reader
frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header
buffer [4]byte
maxLength uint32
}
type tFramedTransportFactory struct {
factory TTransportFactory
maxLength uint32
}
func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory {
return &tFramedTransportFactory{factory: factory, maxLength: maxLength}
}
func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) {
tt, err := p.factory.GetTransport(base)
if err != nil {
return nil, err
}
return NewTFramedTransportMaxLength(tt, p.maxLength), nil
}
func NewTFramedTransport(transport TTransport) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH}
}
func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport {
return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength}
}
func (p *TFramedTransport) Open() error {
return p.transport.Open()
}
func (p *TFramedTransport) IsOpen() bool {
return p.transport.IsOpen()
}
func (p *TFramedTransport) Close() error {
return p.transport.Close()
}
func (p *TFramedTransport) Read(buf []byte) (l int, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < uint32(len(buf)) {
frameSize := p.frameSize
tmp := make([]byte, p.frameSize)
l, err = p.Read(tmp)
copy(buf, tmp)
if err == nil {
err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf)))
return
}
}
got, err := p.reader.Read(buf)
p.frameSize = p.frameSize - uint32(got)
//sanity check
if p.frameSize < 0 {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size")
}
return got, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) ReadByte() (c byte, err error) {
if p.frameSize == 0 {
p.frameSize, err = p.readFrameHeader()
if err != nil {
return
}
}
if p.frameSize < 1 {
return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1))
}
c, err = p.reader.ReadByte()
if err == nil {
p.frameSize--
}
return
}
func (p *TFramedTransport) Write(buf []byte) (int, error) {
n, err := p.buf.Write(buf)
return n, NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) WriteByte(c byte) error {
return p.buf.WriteByte(c)
}
func (p *TFramedTransport) WriteString(s string) (n int, err error) {
return p.buf.WriteString(s)
}
func (p *TFramedTransport) Flush() error {
size := p.buf.Len()
buf := p.buffer[:4]
binary.BigEndian.PutUint32(buf, uint32(size))
_, err := p.transport.Write(buf)
if err != nil {
p.buf.Truncate(0)
return NewTTransportExceptionFromError(err)
}
if size > 0 {
if n, err := p.buf.WriteTo(p.transport); err != nil {
print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n")
p.buf.Truncate(0)
return NewTTransportExceptionFromError(err)
}
}
err = p.transport.Flush()
return NewTTransportExceptionFromError(err)
}
func (p *TFramedTransport) readFrameHeader() (uint32, error) {
buf := p.buffer[:4]
if _, err := io.ReadFull(p.reader, buf); err != nil {
return 0, err
}
size := binary.BigEndian.Uint32(buf)
if size < 0 || size > p.maxLength {
return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size))
}
return size, nil
}
func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) {
return uint64(p.frameSize)
}

26
vendor/github.com/apache/thrift/lib/go/thrift/go17.go generated vendored Normal file
View File

@ -0,0 +1,26 @@
// +build go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "context"
var defaultCtx = context.Background()

View File

@ -0,0 +1,238 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
"io"
"io/ioutil"
"net/http"
"net/url"
"strconv"
)
// Default to using the shared http client. Library users are
// free to change this global client or specify one through
// THttpClientOptions.
var DefaultHttpClient *http.Client = http.DefaultClient
type THttpClient struct {
client *http.Client
response *http.Response
url *url.URL
requestBuffer *bytes.Buffer
header http.Header
nsecConnectTimeout int64
nsecReadTimeout int64
}
type THttpClientTransportFactory struct {
options THttpClientOptions
url string
}
func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
if trans != nil {
t, ok := trans.(*THttpClient)
if ok && t.url != nil {
return NewTHttpClientWithOptions(t.url.String(), p.options)
}
}
return NewTHttpClientWithOptions(p.url, p.options)
}
type THttpClientOptions struct {
// If nil, DefaultHttpClient is used
Client *http.Client
}
func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory {
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
}
func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
return &THttpClientTransportFactory{url: url, options: options}
}
func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
parsedURL, err := url.Parse(urlstr)
if err != nil {
return nil, err
}
buf := make([]byte, 0, 1024)
client := options.Client
if client == nil {
client = DefaultHttpClient
}
httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}}
return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil
}
func NewTHttpClient(urlstr string) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
}
// Set the HTTP Header for this specific Thrift Transport
// It is important that you first assert the TTransport as a THttpClient type
// like so:
//
// httpTrans := trans.(THttpClient)
// httpTrans.SetHeader("User-Agent","Thrift Client 1.0")
func (p *THttpClient) SetHeader(key string, value string) {
p.header.Add(key, value)
}
// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport
// It is important that you first assert the TTransport as a THttpClient type
// like so:
//
// httpTrans := trans.(THttpClient)
// hdrValue := httpTrans.GetHeader("User-Agent")
func (p *THttpClient) GetHeader(key string) string {
return p.header.Get(key)
}
// Deletes the HTTP Header given a Header Key for this specific Thrift Transport
// It is important that you first assert the TTransport as a THttpClient type
// like so:
//
// httpTrans := trans.(THttpClient)
// httpTrans.DelHeader("User-Agent")
func (p *THttpClient) DelHeader(key string) {
p.header.Del(key)
}
func (p *THttpClient) Open() error {
// do nothing
return nil
}
func (p *THttpClient) IsOpen() bool {
return p.response != nil || p.requestBuffer != nil
}
func (p *THttpClient) closeResponse() error {
var err error
if p.response != nil && p.response.Body != nil {
// The docs specify that if keepalive is enabled and the response body is not
// read to completion the connection will never be returned to the pool and
// reused. Errors are being ignored here because if the connection is invalid
// and this fails for some reason, the Close() method will do any remaining
// cleanup.
io.Copy(ioutil.Discard, p.response.Body)
err = p.response.Body.Close()
}
p.response = nil
return err
}
func (p *THttpClient) Close() error {
if p.requestBuffer != nil {
p.requestBuffer.Reset()
p.requestBuffer = nil
}
return p.closeResponse()
}
func (p *THttpClient) Read(buf []byte) (int, error) {
if p.response == nil {
return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.")
}
n, err := p.response.Body.Read(buf)
if n > 0 && (err == nil || err == io.EOF) {
return n, nil
}
return n, NewTTransportExceptionFromError(err)
}
func (p *THttpClient) ReadByte() (c byte, err error) {
return readByte(p.response.Body)
}
func (p *THttpClient) Write(buf []byte) (int, error) {
n, err := p.requestBuffer.Write(buf)
return n, err
}
func (p *THttpClient) WriteByte(c byte) error {
return p.requestBuffer.WriteByte(c)
}
func (p *THttpClient) WriteString(s string) (n int, err error) {
return p.requestBuffer.WriteString(s)
}
func (p *THttpClient) Flush() error {
// Close any previous response body to avoid leaking connections.
p.closeResponse()
req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer)
if err != nil {
return NewTTransportExceptionFromError(err)
}
req.Header = p.header
response, err := p.client.Do(req)
if err != nil {
return NewTTransportExceptionFromError(err)
}
if response.StatusCode != http.StatusOK {
// Close the response to avoid leaking file descriptors. closeResponse does
// more than just call Close(), so temporarily assign it and reuse the logic.
p.response = response
p.closeResponse()
// TODO(pomack) log bad response
return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode))
}
p.response = response
return nil
}
func (p *THttpClient) RemainingBytes() (num_bytes uint64) {
len := p.response.ContentLength
if len >= 0 {
return uint64(len)
}
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}
// Deprecated: Use NewTHttpClientTransportFactory instead.
func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory {
return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{})
}
// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead.
func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory {
return NewTHttpClientTransportFactoryWithOptions(url, options)
}
// Deprecated: Use NewTHttpClientWithOptions instead.
func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, options)
}
// Deprecated: Use NewTHttpClient instead.
func NewTHttpPostClient(urlstr string) (TTransport, error) {
return NewTHttpClientWithOptions(urlstr, THttpClientOptions{})
}

View File

@ -0,0 +1,51 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"compress/gzip"
"io"
"net/http"
"strings"
)
// gz transparently compresses the HTTP response if the client supports it.
func gz(handler http.HandlerFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") {
handler(w, r)
return
}
w.Header().Set("Content-Encoding", "gzip")
gz := gzip.NewWriter(w)
defer gz.Close()
gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w}
handler(gzw, r)
}
}
type gzipResponseWriter struct {
io.Writer
http.ResponseWriter
}
func (w gzipResponseWriter) Write(b []byte) (int, error) {
return w.Writer.Write(b)
}

View File

@ -0,0 +1,38 @@
// +build go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"net/http"
)
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
func NewThriftHandlerFunc(processor TProcessor,
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
return gz(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/x-thrift")
transport := NewStreamTransport(r.Body, w)
processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
})
}

View File

@ -0,0 +1,40 @@
// +build !go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"net/http"
"golang.org/x/net/context"
)
// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function
func NewThriftHandlerFunc(processor TProcessor,
inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) {
return gz(func(w http.ResponseWriter, r *http.Request) {
w.Header().Add("Content-Type", "application/x-thrift")
transport := NewStreamTransport(r.Body, w)
processor.Process(context.Background(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport))
})
}

View File

@ -0,0 +1,213 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bufio"
"io"
)
// StreamTransport is a Transport made of an io.Reader and/or an io.Writer
type StreamTransport struct {
io.Reader
io.Writer
isReadWriter bool
closed bool
}
type StreamTransportFactory struct {
Reader io.Reader
Writer io.Writer
isReadWriter bool
}
func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
if trans != nil {
t, ok := trans.(*StreamTransport)
if ok {
if t.isReadWriter {
return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil
}
if t.Reader != nil && t.Writer != nil {
return NewStreamTransport(t.Reader, t.Writer), nil
}
if t.Reader != nil && t.Writer == nil {
return NewStreamTransportR(t.Reader), nil
}
if t.Reader == nil && t.Writer != nil {
return NewStreamTransportW(t.Writer), nil
}
return &StreamTransport{}, nil
}
}
if p.isReadWriter {
return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil
}
if p.Reader != nil && p.Writer != nil {
return NewStreamTransport(p.Reader, p.Writer), nil
}
if p.Reader != nil && p.Writer == nil {
return NewStreamTransportR(p.Reader), nil
}
if p.Reader == nil && p.Writer != nil {
return NewStreamTransportW(p.Writer), nil
}
return &StreamTransport{}, nil
}
func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory {
return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter}
}
func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport {
return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)}
}
func NewStreamTransportR(r io.Reader) *StreamTransport {
return &StreamTransport{Reader: bufio.NewReader(r)}
}
func NewStreamTransportW(w io.Writer) *StreamTransport {
return &StreamTransport{Writer: bufio.NewWriter(w)}
}
func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport {
bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw))
return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true}
}
func (p *StreamTransport) IsOpen() bool {
return !p.closed
}
// implicitly opened on creation, can't be reopened once closed
func (p *StreamTransport) Open() error {
if !p.closed {
return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.")
} else {
return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.")
}
}
// Closes both the input and output streams.
func (p *StreamTransport) Close() error {
if p.closed {
return NewTTransportException(NOT_OPEN, "StreamTransport already closed.")
}
p.closed = true
closedReader := false
if p.Reader != nil {
c, ok := p.Reader.(io.Closer)
if ok {
e := c.Close()
closedReader = true
if e != nil {
return e
}
}
p.Reader = nil
}
if p.Writer != nil && (!closedReader || !p.isReadWriter) {
c, ok := p.Writer.(io.Closer)
if ok {
e := c.Close()
if e != nil {
return e
}
}
p.Writer = nil
}
return nil
}
// Flushes the underlying output stream if not null.
func (p *StreamTransport) Flush() error {
if p.Writer == nil {
return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream")
}
f, ok := p.Writer.(Flusher)
if ok {
err := f.Flush()
if err != nil {
return NewTTransportExceptionFromError(err)
}
}
return nil
}
func (p *StreamTransport) Read(c []byte) (n int, err error) {
n, err = p.Reader.Read(c)
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) ReadByte() (c byte, err error) {
f, ok := p.Reader.(io.ByteReader)
if ok {
c, err = f.ReadByte()
} else {
c, err = readByte(p.Reader)
}
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) Write(c []byte) (n int, err error) {
n, err = p.Writer.Write(c)
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) WriteByte(c byte) (err error) {
f, ok := p.Writer.(io.ByteWriter)
if ok {
err = f.WriteByte(c)
} else {
err = writeByte(p.Writer, c)
}
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) WriteString(s string) (n int, err error) {
f, ok := p.Writer.(stringWriter)
if ok {
n, err = f.WriteString(s)
} else {
n, err = p.Writer.Write([]byte(s))
}
if err != nil {
err = NewTTransportExceptionFromError(err)
}
return
}
func (p *StreamTransport) RemainingBytes() (num_bytes uint64) {
const maxSize = ^uint64(0)
return maxSize // the thruth is, we just don't know unless framed is used
}

View File

@ -0,0 +1,583 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"encoding/base64"
"fmt"
)
const (
THRIFT_JSON_PROTOCOL_VERSION = 1
)
// for references to _ParseContext see tsimplejson_protocol.go
// JSON protocol implementation for thrift.
//
// This protocol produces/consumes a simple output format
// suitable for parsing by scripting languages. It should not be
// confused with the full-featured TJSONProtocol.
//
type TJSONProtocol struct {
*TSimpleJSONProtocol
}
// Constructor
func NewTJSONProtocol(t TTransport) *TJSONProtocol {
v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)}
v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL))
v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL))
return v
}
// Factory
type TJSONProtocolFactory struct{}
func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol {
return NewTJSONProtocol(trans)
}
func NewTJSONProtocolFactory() *TJSONProtocolFactory {
return &TJSONProtocolFactory{}
}
func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error {
p.resetContextStack() // THRIFT-3735
if e := p.OutputListBegin(); e != nil {
return e
}
if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil {
return e
}
if e := p.WriteString(name); e != nil {
return e
}
if e := p.WriteByte(int8(typeId)); e != nil {
return e
}
if e := p.WriteI32(seqId); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteMessageEnd() error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteStructBegin(name string) error {
if e := p.OutputObjectBegin(); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteStructEnd() error {
return p.OutputObjectEnd()
}
func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error {
if e := p.WriteI16(id); e != nil {
return e
}
if e := p.OutputObjectBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(typeId)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) WriteFieldEnd() error {
return p.OutputObjectEnd()
}
func (p *TJSONProtocol) WriteFieldStop() error { return nil }
func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(keyType)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
s, e1 = p.TypeIdToString(valueType)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
if e := p.WriteI64(int64(size)); e != nil {
return e
}
return p.OutputObjectBegin()
}
func (p *TJSONProtocol) WriteMapEnd() error {
if e := p.OutputObjectEnd(); e != nil {
return e
}
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
func (p *TJSONProtocol) WriteListEnd() error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error {
return p.OutputElemListBegin(elemType, size)
}
func (p *TJSONProtocol) WriteSetEnd() error {
return p.OutputListEnd()
}
func (p *TJSONProtocol) WriteBool(b bool) error {
if b {
return p.WriteI32(1)
}
return p.WriteI32(0)
}
func (p *TJSONProtocol) WriteByte(b int8) error {
return p.WriteI32(int32(b))
}
func (p *TJSONProtocol) WriteI16(v int16) error {
return p.WriteI32(int32(v))
}
func (p *TJSONProtocol) WriteI32(v int32) error {
return p.OutputI64(int64(v))
}
func (p *TJSONProtocol) WriteI64(v int64) error {
return p.OutputI64(int64(v))
}
func (p *TJSONProtocol) WriteDouble(v float64) error {
return p.OutputF64(v)
}
func (p *TJSONProtocol) WriteString(v string) error {
return p.OutputString(v)
}
func (p *TJSONProtocol) WriteBinary(v []byte) error {
// JSON library only takes in a string,
// not an arbitrary byte array, to ensure bytes are transmitted
// efficiently we must convert this into a valid JSON string
// therefore we use base64 encoding to avoid excessive escaping/quoting
if e := p.OutputPreValue(); e != nil {
return e
}
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
return NewTProtocolException(e)
}
writer := base64.NewEncoder(base64.StdEncoding, p.writer)
if _, e := writer.Write(v); e != nil {
p.writer.Reset(p.trans) // THRIFT-3735
return NewTProtocolException(e)
}
if e := writer.Close(); e != nil {
return NewTProtocolException(e)
}
if _, e := p.write(JSON_QUOTE_BYTES); e != nil {
return NewTProtocolException(e)
}
return p.OutputPostValue()
}
// Reading methods.
func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) {
p.resetContextStack() // THRIFT-3735
if isNull, err := p.ParseListBegin(); isNull || err != nil {
return name, typeId, seqId, err
}
version, err := p.ReadI32()
if err != nil {
return name, typeId, seqId, err
}
if version != THRIFT_JSON_PROTOCOL_VERSION {
e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION)
return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
if name, err = p.ReadString(); err != nil {
return name, typeId, seqId, err
}
bTypeId, err := p.ReadByte()
typeId = TMessageType(bTypeId)
if err != nil {
return name, typeId, seqId, err
}
if seqId, err = p.ReadI32(); err != nil {
return name, typeId, seqId, err
}
return name, typeId, seqId, nil
}
func (p *TJSONProtocol) ReadMessageEnd() error {
err := p.ParseListEnd()
return err
}
func (p *TJSONProtocol) ReadStructBegin() (name string, err error) {
_, err = p.ParseObjectStart()
return "", err
}
func (p *TJSONProtocol) ReadStructEnd() error {
return p.ParseObjectEnd()
}
func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) {
b, _ := p.reader.Peek(1)
if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] {
return "", STOP, -1, nil
}
fieldId, err := p.ReadI16()
if err != nil {
return "", STOP, fieldId, err
}
if _, err = p.ParseObjectStart(); err != nil {
return "", STOP, fieldId, err
}
sType, err := p.ReadString()
if err != nil {
return "", STOP, fieldId, err
}
fType, err := p.StringToTypeId(sType)
return "", fType, fieldId, err
}
func (p *TJSONProtocol) ReadFieldEnd() error {
return p.ParseObjectEnd()
}
func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, VOID, 0, e
}
// read keyType
sKeyType, e := p.ReadString()
if e != nil {
return keyType, valueType, size, e
}
keyType, e = p.StringToTypeId(sKeyType)
if e != nil {
return keyType, valueType, size, e
}
// read valueType
sValueType, e := p.ReadString()
if e != nil {
return keyType, valueType, size, e
}
valueType, e = p.StringToTypeId(sValueType)
if e != nil {
return keyType, valueType, size, e
}
// read size
iSize, e := p.ReadI64()
if e != nil {
return keyType, valueType, size, e
}
size = int(iSize)
_, e = p.ParseObjectStart()
return keyType, valueType, size, e
}
func (p *TJSONProtocol) ReadMapEnd() error {
e := p.ParseObjectEnd()
if e != nil {
return e
}
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
func (p *TJSONProtocol) ReadListEnd() error {
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) {
return p.ParseElemListBegin()
}
func (p *TJSONProtocol) ReadSetEnd() error {
return p.ParseListEnd()
}
func (p *TJSONProtocol) ReadBool() (bool, error) {
value, err := p.ReadI32()
return (value != 0), err
}
func (p *TJSONProtocol) ReadByte() (int8, error) {
v, err := p.ReadI64()
return int8(v), err
}
func (p *TJSONProtocol) ReadI16() (int16, error) {
v, err := p.ReadI64()
return int16(v), err
}
func (p *TJSONProtocol) ReadI32() (int32, error) {
v, err := p.ReadI64()
return int32(v), err
}
func (p *TJSONProtocol) ReadI64() (int64, error) {
v, _, err := p.ParseI64()
return v, err
}
func (p *TJSONProtocol) ReadDouble() (float64, error) {
v, _, err := p.ParseF64()
return v, err
}
func (p *TJSONProtocol) ReadString() (string, error) {
var v string
if err := p.ParsePreValue(); err != nil {
return v, err
}
f, _ := p.reader.Peek(1)
if len(f) > 0 && f[0] == JSON_QUOTE {
p.reader.ReadByte()
value, err := p.ParseStringBody()
v = value
if err != nil {
return v, err
}
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
b := make([]byte, len(JSON_NULL))
_, err := p.reader.Read(b)
if err != nil {
return v, NewTProtocolException(err)
}
if string(b) != string(JSON_NULL) {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
} else {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
return v, p.ParsePostValue()
}
func (p *TJSONProtocol) ReadBinary() ([]byte, error) {
var v []byte
if err := p.ParsePreValue(); err != nil {
return nil, err
}
f, _ := p.reader.Peek(1)
if len(f) > 0 && f[0] == JSON_QUOTE {
p.reader.ReadByte()
value, err := p.ParseBase64EncodedBody()
v = value
if err != nil {
return v, err
}
} else if len(f) > 0 && f[0] == JSON_NULL[0] {
b := make([]byte, len(JSON_NULL))
_, err := p.reader.Read(b)
if err != nil {
return v, NewTProtocolException(err)
}
if string(b) != string(JSON_NULL) {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
} else {
e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f))
return v, NewTProtocolExceptionWithType(INVALID_DATA, e)
}
return v, p.ParsePostValue()
}
func (p *TJSONProtocol) Flush() (err error) {
err = p.writer.Flush()
if err == nil {
err = p.trans.Flush()
}
return NewTProtocolException(err)
}
func (p *TJSONProtocol) Skip(fieldType TType) (err error) {
return SkipDefaultDepth(p, fieldType)
}
func (p *TJSONProtocol) Transport() TTransport {
return p.trans
}
func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(elemType)
if e1 != nil {
return e1
}
if e := p.WriteString(s); e != nil {
return e
}
if e := p.WriteI64(int64(size)); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, 0, e
}
sElemType, err := p.ReadString()
if err != nil {
return VOID, size, err
}
elemType, err = p.StringToTypeId(sElemType)
if err != nil {
return elemType, size, err
}
nSize, err2 := p.ReadI64()
size = int(nSize)
return elemType, size, err2
}
func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) {
if isNull, e := p.ParseListBegin(); isNull || e != nil {
return VOID, 0, e
}
sElemType, err := p.ReadString()
if err != nil {
return VOID, size, err
}
elemType, err = p.StringToTypeId(sElemType)
if err != nil {
return elemType, size, err
}
nSize, err2 := p.ReadI64()
size = int(nSize)
return elemType, size, err2
}
func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error {
if e := p.OutputListBegin(); e != nil {
return e
}
s, e1 := p.TypeIdToString(elemType)
if e1 != nil {
return e1
}
if e := p.OutputString(s); e != nil {
return e
}
if e := p.OutputI64(int64(size)); e != nil {
return e
}
return nil
}
func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) {
switch byte(fieldType) {
case BOOL:
return "tf", nil
case BYTE:
return "i8", nil
case I16:
return "i16", nil
case I32:
return "i32", nil
case I64:
return "i64", nil
case DOUBLE:
return "dbl", nil
case STRING:
return "str", nil
case STRUCT:
return "rec", nil
case MAP:
return "map", nil
case SET:
return "set", nil
case LIST:
return "lst", nil
}
e := fmt.Errorf("Unknown fieldType: %d", int(fieldType))
return "", NewTProtocolExceptionWithType(INVALID_DATA, e)
}
func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) {
switch fieldType {
case "tf":
return TType(BOOL), nil
case "i8":
return TType(BYTE), nil
case "i16":
return TType(I16), nil
case "i32":
return TType(I32), nil
case "i64":
return TType(I64), nil
case "dbl":
return TType(DOUBLE), nil
case "str":
return TType(STRING), nil
case "rec":
return TType(STRUCT), nil
case "map":
return TType(MAP), nil
case "set":
return TType(SET), nil
case "lst":
return TType(LIST), nil
}
e := fmt.Errorf("Unknown type identifier: %s", fieldType)
return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e)
}

View File

@ -0,0 +1,79 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"bytes"
)
// Memory buffer-based implementation of the TTransport interface.
type TMemoryBuffer struct {
*bytes.Buffer
size int
}
type TMemoryBufferTransportFactory struct {
size int
}
func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) {
if trans != nil {
t, ok := trans.(*TMemoryBuffer)
if ok && t.size > 0 {
return NewTMemoryBufferLen(t.size), nil
}
}
return NewTMemoryBufferLen(p.size), nil
}
func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory {
return &TMemoryBufferTransportFactory{size: size}
}
func NewTMemoryBuffer() *TMemoryBuffer {
return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0}
}
func NewTMemoryBufferLen(size int) *TMemoryBuffer {
buf := make([]byte, 0, size)
return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size}
}
func (p *TMemoryBuffer) IsOpen() bool {
return true
}
func (p *TMemoryBuffer) Open() error {
return nil
}
func (p *TMemoryBuffer) Close() error {
p.Buffer.Reset()
return nil
}
// Flushing a memory buffer is a no-op
func (p *TMemoryBuffer) Flush() error {
return nil
}
func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) {
return uint64(p.Buffer.Len())
}

View File

@ -0,0 +1,31 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// Message type constants in the Thrift protocol.
type TMessageType int32
const (
INVALID_TMESSAGE_TYPE TMessageType = 0
CALL TMessageType = 1
REPLY TMessageType = 2
EXCEPTION TMessageType = 3
ONEWAY TMessageType = 4
)

View File

@ -0,0 +1,139 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
/*
TMultiplexedProtocol is a protocol-independent concrete decorator
that allows a Thrift client to communicate with a multiplexing Thrift server,
by prepending the service name to the function name during function calls.
NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request
from a multiplexing client.
This example uses a single socket transport to invoke two services:
socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT)
transport := thrift.NewTFramedTransport(socket)
protocol := thrift.NewTBinaryProtocolTransport(transport)
mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator")
service := Calculator.NewCalculatorClient(mp)
mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport")
service2 := WeatherReport.NewWeatherReportClient(mp2)
err := transport.Open()
if err != nil {
t.Fatal("Unable to open client socket", err)
}
fmt.Println(service.Add(2,2))
fmt.Println(service2.GetTemperature())
*/
type TMultiplexedProtocol struct {
TProtocol
serviceName string
}
const MULTIPLEXED_SEPARATOR = ":"
func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol {
return &TMultiplexedProtocol{
TProtocol: protocol,
serviceName: serviceName,
}
}
func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error {
if typeId == CALL || typeId == ONEWAY {
return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid)
} else {
return t.TProtocol.WriteMessageBegin(name, typeId, seqid)
}
}
/*
TMultiplexedProcessor is a TProcessor allowing
a single TServer to provide multiple services.
To do so, you instantiate the processor and then register additional
processors with it, as shown in the following example:
var processor = thrift.NewTMultiplexedProcessor()
firstProcessor :=
processor.RegisterProcessor("FirstService", firstProcessor)
processor.registerProcessor(
"Calculator",
Calculator.NewCalculatorProcessor(&CalculatorHandler{}),
)
processor.registerProcessor(
"WeatherReport",
WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}),
)
serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT)
if err != nil {
t.Fatal("Unable to create server socket", err)
}
server := thrift.NewTSimpleServer2(processor, serverTransport)
server.Serve();
*/
type TMultiplexedProcessor struct {
serviceProcessorMap map[string]TProcessor
DefaultProcessor TProcessor
}
func NewTMultiplexedProcessor() *TMultiplexedProcessor {
return &TMultiplexedProcessor{
serviceProcessorMap: make(map[string]TProcessor),
}
}
func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) {
t.DefaultProcessor = processor
}
func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) {
if t.serviceProcessorMap == nil {
t.serviceProcessorMap = make(map[string]TProcessor)
}
t.serviceProcessorMap[name] = processor
}
//Protocol that use stored message for ReadMessageBegin
type storedMessageProtocol struct {
TProtocol
name string
typeId TMessageType
seqid int32
}
func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol {
return &storedMessageProtocol{protocol, name, typeId, seqid}
}
func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) {
return s.name, s.typeId, s.seqid, nil
}

View File

@ -0,0 +1,53 @@
// +build go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"context"
"fmt"
"strings"
)
func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
name, typeId, seqid, err := in.ReadMessageBegin()
if err != nil {
return false, err
}
if typeId != CALL && typeId != ONEWAY {
return false, fmt.Errorf("Unexpected message type %v", typeId)
}
//extract the service name
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
if len(v) != 2 {
if t.DefaultProcessor != nil {
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
return t.DefaultProcessor.Process(ctx, smb, out)
}
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
}
actualProcessor, ok := t.serviceProcessorMap[v[0]]
if !ok {
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
}
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
return actualProcessor.Process(ctx, smb, out)
}

View File

@ -0,0 +1,54 @@
// +build !go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"fmt"
"strings"
"golang.org/x/net/context"
)
func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) {
name, typeId, seqid, err := in.ReadMessageBegin()
if err != nil {
return false, err
}
if typeId != CALL && typeId != ONEWAY {
return false, fmt.Errorf("Unexpected message type %v", typeId)
}
//extract the service name
v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2)
if len(v) != 2 {
if t.DefaultProcessor != nil {
smb := NewStoredMessageProtocol(in, name, typeId, seqid)
return t.DefaultProcessor.Process(ctx, smb, out)
}
return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name)
}
actualProcessor, ok := t.serviceProcessorMap[v[0]]
if !ok {
return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0])
}
smb := NewStoredMessageProtocol(in, v[1], typeId, seqid)
return actualProcessor.Process(ctx, smb, out)
}

View File

@ -0,0 +1,164 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import (
"math"
"strconv"
)
type Numeric interface {
Int64() int64
Int32() int32
Int16() int16
Byte() byte
Int() int
Float64() float64
Float32() float32
String() string
isNull() bool
}
type numeric struct {
iValue int64
dValue float64
sValue string
isNil bool
}
var (
INFINITY Numeric
NEGATIVE_INFINITY Numeric
NAN Numeric
ZERO Numeric
NUMERIC_NULL Numeric
)
func NewNumericFromDouble(dValue float64) Numeric {
if math.IsInf(dValue, 1) {
return INFINITY
}
if math.IsInf(dValue, -1) {
return NEGATIVE_INFINITY
}
if math.IsNaN(dValue) {
return NAN
}
iValue := int64(dValue)
sValue := strconv.FormatFloat(dValue, 'g', 10, 64)
isNil := false
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromI64(iValue int64) Numeric {
dValue := float64(iValue)
sValue := string(iValue)
isNil := false
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromI32(iValue int32) Numeric {
dValue := float64(iValue)
sValue := string(iValue)
isNil := false
return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromString(sValue string) Numeric {
if sValue == INFINITY.String() {
return INFINITY
}
if sValue == NEGATIVE_INFINITY.String() {
return NEGATIVE_INFINITY
}
if sValue == NAN.String() {
return NAN
}
iValue, _ := strconv.ParseInt(sValue, 10, 64)
dValue, _ := strconv.ParseFloat(sValue, 64)
isNil := len(sValue) == 0
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil}
}
func NewNumericFromJSONString(sValue string, isNull bool) Numeric {
if isNull {
return NewNullNumeric()
}
if sValue == JSON_INFINITY {
return INFINITY
}
if sValue == JSON_NEGATIVE_INFINITY {
return NEGATIVE_INFINITY
}
if sValue == JSON_NAN {
return NAN
}
iValue, _ := strconv.ParseInt(sValue, 10, 64)
dValue, _ := strconv.ParseFloat(sValue, 64)
return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull}
}
func NewNullNumeric() Numeric {
return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true}
}
func (p *numeric) Int64() int64 {
return p.iValue
}
func (p *numeric) Int32() int32 {
return int32(p.iValue)
}
func (p *numeric) Int16() int16 {
return int16(p.iValue)
}
func (p *numeric) Byte() byte {
return byte(p.iValue)
}
func (p *numeric) Int() int {
return int(p.iValue)
}
func (p *numeric) Float64() float64 {
return p.dValue
}
func (p *numeric) Float32() float32 {
return float32(p.dValue)
}
func (p *numeric) String() string {
return p.sValue
}
func (p *numeric) isNull() bool {
return p.isNil
}
func init() {
INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false}
NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false}
NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false}
ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false}
NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true}
}

View File

@ -0,0 +1,50 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
///////////////////////////////////////////////////////////////////////////////
// This file is home to helpers that convert from various base types to
// respective pointer types. This is necessary because Go does not permit
// references to constants, nor can a pointer type to base type be allocated
// and initialized in a single expression.
//
// E.g., this is not allowed:
//
// var ip *int = &5
//
// But this *is* allowed:
//
// func IntPtr(i int) *int { return &i }
// var ip *int = IntPtr(5)
//
// Since pointers to base types are commonplace as [optional] fields in
// exported thrift structs, we factor such helpers here.
///////////////////////////////////////////////////////////////////////////////
func Float32Ptr(v float32) *float32 { return &v }
func Float64Ptr(v float64) *float64 { return &v }
func IntPtr(v int) *int { return &v }
func Int32Ptr(v int32) *int32 { return &v }
func Int64Ptr(v int64) *int64 { return &v }
func StringPtr(v string) *string { return &v }
func Uint32Ptr(v uint32) *uint32 { return &v }
func Uint64Ptr(v uint64) *uint64 { return &v }
func BoolPtr(v bool) *bool { return &v }
func ByteSlicePtr(v []byte) *[]byte { return &v }

View File

@ -0,0 +1,26 @@
// +build !go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "golang.org/x/net/context"
var defaultCtx = context.Background()

View File

@ -0,0 +1,34 @@
// +build !go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "golang.org/x/net/context"
// A processor is a generic object which operates upon an input stream and
// writes to some output stream.
type TProcessor interface {
Process(ctx context.Context, in, out TProtocol) (bool, TException)
}
type TProcessorFunction interface {
Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
}

View File

@ -0,0 +1,58 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
// The default processor factory just returns a singleton
// instance.
type TProcessorFactory interface {
GetProcessor(trans TTransport) TProcessor
}
type tProcessorFactory struct {
processor TProcessor
}
func NewTProcessorFactory(p TProcessor) TProcessorFactory {
return &tProcessorFactory{processor: p}
}
func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor {
return p.processor
}
/**
* The default processor factory just returns a singleton
* instance.
*/
type TProcessorFunctionFactory interface {
GetProcessorFunction(trans TTransport) TProcessorFunction
}
type tProcessorFunctionFactory struct {
processor TProcessorFunction
}
func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory {
return &tProcessorFunctionFactory{processor: p}
}
func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction {
return p.processor
}

View File

@ -0,0 +1,34 @@
// +build go1.7
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package thrift
import "context"
// A processor is a generic object which operates upon an input stream and
// writes to some output stream.
type TProcessor interface {
Process(ctx context.Context, in, out TProtocol) (bool, TException)
}
type TProcessorFunction interface {
Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException)
}

Some files were not shown because too many files have changed in this diff Show More