initial commit
This commit is contained in:
commit
931404890e
|
@ -0,0 +1,2 @@
|
|||
.env
|
||||
vyvanse
|
|
@ -0,0 +1,198 @@
|
|||
package bot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/bwmarrin/discordgo"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
otlog "github.com/opentracing/opentracing-go/log"
|
||||
"golang.org/x/time/rate"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrRateLimitExceeded = errors.New("bot: per-command rate limit exceeded")
|
||||
)
|
||||
|
||||
type command struct {
|
||||
aliases []string
|
||||
verb string
|
||||
helptext string
|
||||
}
|
||||
|
||||
func (c *command) Verb() string {
|
||||
return c.verb
|
||||
}
|
||||
|
||||
func (c *command) Helptext() string {
|
||||
return c.helptext
|
||||
}
|
||||
|
||||
// Handler is the type that bot command functions need to implement. Errors
|
||||
// should be returned.
|
||||
type Handler func(context.Context, *discordgo.Session, *discordgo.Message, []string) error
|
||||
|
||||
// CommandHandler is a generic interface for types that implement a bot
|
||||
// command. It is akin to http.Handler, but more comprehensive.
|
||||
type CommandHandler interface {
|
||||
Verb() string
|
||||
Helptext() string
|
||||
|
||||
Handler(context.Context, *discordgo.Session, *discordgo.Message, []string) error
|
||||
Permissions(context.Context, *discordgo.Session, *discordgo.Message, []string) error
|
||||
}
|
||||
|
||||
type basicCommand struct {
|
||||
*command
|
||||
handler Handler
|
||||
permissions Handler
|
||||
limiter *rate.Limiter
|
||||
}
|
||||
|
||||
func (bc *basicCommand) Handler(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
|
||||
return bc.handler(ctx, s, m, parv)
|
||||
}
|
||||
|
||||
func (bc *basicCommand) Permissions(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
|
||||
if !bc.limiter.Allow() {
|
||||
return ErrRateLimitExceeded
|
||||
}
|
||||
|
||||
return bc.permissions(ctx, s, m, parv)
|
||||
}
|
||||
|
||||
// The "default" command set, useful for simple bot projects.
|
||||
var (
|
||||
DefaultCommandSet = NewCommandSet()
|
||||
)
|
||||
|
||||
// Command handling errors.
|
||||
var (
|
||||
ErrAlreadyExists = errors.New("bot: command already exists")
|
||||
ErrNoSuchCommand = errors.New("bot: no such command exists")
|
||||
ErrNoPermissions = errors.New("bot: you do not have permissions for this command")
|
||||
ErrParvCountMismatch = errors.New("bot: parameter count mismatch")
|
||||
)
|
||||
|
||||
// The default command prefix. Command `foo` becomes `.foo` in chat, etc.
|
||||
const (
|
||||
DefaultPrefix = "."
|
||||
)
|
||||
|
||||
// NewCommand creates an anonymous command and adds it to the default CommandSet.
|
||||
func NewCommand(verb, helptext string, handler, permissions Handler) error {
|
||||
return DefaultCommandSet.Add(NewBasicCommand(verb, helptext, handler, permissions))
|
||||
}
|
||||
|
||||
// NewBasicCommand creates a CommandHandler instance using the implementation
|
||||
// functions supplied as arguments.
|
||||
func NewBasicCommand(verb, helptext string, permissions, handler Handler) CommandHandler {
|
||||
return &basicCommand{
|
||||
command: &command{
|
||||
verb: verb,
|
||||
helptext: helptext,
|
||||
},
|
||||
handler: handler,
|
||||
permissions: permissions,
|
||||
limiter: rate.NewLimiter(rate.Every(5*time.Second), 1),
|
||||
}
|
||||
}
|
||||
|
||||
// CommandSet is a group of bot commands similar to an http.ServeMux.
|
||||
type CommandSet struct {
|
||||
sync.Mutex
|
||||
cmds map[string]CommandHandler
|
||||
|
||||
Prefix string
|
||||
}
|
||||
|
||||
// NewCommandSet creates a new command set with the `help` command pre-loaded.
|
||||
func NewCommandSet() *CommandSet {
|
||||
cs := &CommandSet{
|
||||
cmds: map[string]CommandHandler{},
|
||||
Prefix: DefaultPrefix,
|
||||
}
|
||||
|
||||
cs.AddCmd("help", "Shows help for the bot", NoPermissions, cs.help)
|
||||
|
||||
return cs
|
||||
}
|
||||
|
||||
// NoPermissions is a simple middelware function that allows all command invocations
|
||||
// to pass the permissions check.
|
||||
func NoPermissions(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// AddCmd is syntactic sugar for cs.Add(NewBasicCommand(args...))
|
||||
func (cs *CommandSet) AddCmd(verb, helptext string, permissions, handler Handler) error {
|
||||
return cs.Add(NewBasicCommand(verb, helptext, permissions, handler))
|
||||
}
|
||||
|
||||
// Add adds a single command handler to the CommandSet. This can be done at runtime
|
||||
// but it is suggested to only add commands on application boot.
|
||||
func (cs *CommandSet) Add(h CommandHandler) error {
|
||||
cs.Lock()
|
||||
defer cs.Unlock()
|
||||
|
||||
v := strings.ToLower(h.Verb())
|
||||
|
||||
if _, ok := cs.cmds[v]; ok {
|
||||
return ErrAlreadyExists
|
||||
}
|
||||
|
||||
cs.cmds[v] = h
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run makes a CommandSet compatible with discordgo event dispatching.
|
||||
func (cs *CommandSet) Run(s *discordgo.Session, msg *discordgo.Message) error {
|
||||
cs.Lock()
|
||||
defer cs.Unlock()
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
sp, ctx := opentracing.StartSpanFromContext(ctx, "CommandSet.Run")
|
||||
defer sp.Finish()
|
||||
|
||||
if strings.HasPrefix(msg.Content, cs.Prefix) {
|
||||
params := strings.Fields(msg.Content)
|
||||
verb := strings.ToLower(params[0][1:])
|
||||
|
||||
sp.LogFields(
|
||||
otlog.String("message-id", msg.ID),
|
||||
otlog.String("author", msg.Author.ID),
|
||||
otlog.String("channel-id", msg.ChannelID),
|
||||
otlog.String("verb", verb),
|
||||
otlog.Int("parc", len(params)),
|
||||
)
|
||||
|
||||
cmd, ok := cs.cmds[verb]
|
||||
if !ok {
|
||||
return ErrNoSuchCommand
|
||||
}
|
||||
|
||||
err := cmd.Permissions(ctx, s, msg, params)
|
||||
if err != nil {
|
||||
sp.LogFields(otlog.Error(err))
|
||||
log.Printf("Permissions error: %s: %v", msg.Author.Username, err)
|
||||
s.ChannelMessageSend(msg.ChannelID, "You don't have permissions for that, sorry.")
|
||||
return ErrNoPermissions
|
||||
}
|
||||
|
||||
err = cmd.Handler(ctx, s, msg, params)
|
||||
if err != nil {
|
||||
log.Printf("command handler error: %v", err)
|
||||
s.ChannelMessageSend(msg.ChannelID, "error when running that command: "+err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
/*
|
||||
Package bot contains some generically useful bot rigging for Discord chatbots.
|
||||
|
||||
This package works by defining command handlers in a CommandSet, and then dispatching
|
||||
based on message contents. If the bot's command prefix is `;`, then `;foo` activates
|
||||
the command handler for command `foo`.
|
||||
|
||||
A CommandSet has a mutex baked into it for convenience of command implementation.
|
||||
*/
|
||||
package bot
|
|
@ -0,0 +1,29 @@
|
|||
package bot
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/bwmarrin/discordgo"
|
||||
)
|
||||
|
||||
func (cs *CommandSet) help(ctx context.Context, s *discordgo.Session, m *discordgo.Message, parv []string) error {
|
||||
switch len(parv) {
|
||||
case 1:
|
||||
// print all help on all commands
|
||||
result := "Bot commands: \n"
|
||||
|
||||
for verb, cmd := range cs.cmds {
|
||||
result += fmt.Sprintf("%s%s: %s\n", cs.Prefix, verb, cmd.Helptext())
|
||||
}
|
||||
|
||||
result += "If there's any problems please don't hesitate to ask a server admin for help."
|
||||
|
||||
s.ChannelMessageSend(m.ChannelID, result)
|
||||
|
||||
default:
|
||||
return ErrParvCountMismatch
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,32 @@
|
|||
$repo = "git.xeserv.us/xena/vyvanse"
|
||||
$gover = "1.8.3"
|
||||
|
||||
from "xena/go-mini:#{$gover}"
|
||||
run "go#{$gover} download"
|
||||
|
||||
def foldercopy(dir)
|
||||
copy "#{dir}", "/root/go/src/#{$repo}/#{dir}"
|
||||
end
|
||||
|
||||
def gobuild(pkg)
|
||||
run "go#{$gover} build #{$repo}/#{pkg} && go#{$gover} install #{$repo}/#{pkg}"
|
||||
end
|
||||
|
||||
folders = [
|
||||
"bot",
|
||||
"cmd",
|
||||
"vendor",
|
||||
"vendor-log"
|
||||
]
|
||||
|
||||
folders.each { |x| foldercopy x }
|
||||
|
||||
gobuild "cmd/vyvanse"
|
||||
|
||||
cmd "/root/go/bin/vyvanse"
|
||||
|
||||
run "rm -rf $HOME/sdk /root/go/pkg"
|
||||
run "apk del go#{$gover}"
|
||||
flatten
|
||||
|
||||
tag "xena/vyvanse"
|
|
@ -0,0 +1,7 @@
|
|||
task up, "starts docker-compose services":
|
||||
exec "box box.rb"
|
||||
exec "docker-compose up -d"
|
||||
|
||||
task down, "stops docker-compose services":
|
||||
exec "docker-compose down"
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
version: "3"
|
||||
|
||||
services:
|
||||
zipkin:
|
||||
image: openzipkin/zipkin
|
||||
environment:
|
||||
STORAGE_TYPE: mem
|
||||
ports:
|
||||
- "9411:9411"
|
||||
|
||||
vyvanse:
|
||||
image: xena/vyvanse
|
||||
env_file: ./.env
|
||||
depends_on:
|
||||
- zipkin
|
|
@ -0,0 +1,41 @@
|
|||
55b3f6a77f08038b1e82c3db3c1ce619e46de9b6 github.com/Shopify/sarama
|
||||
905d0b5876706403fc746bc18dee25d2fecc2518 github.com/Xe/ln
|
||||
ec64f23d236d7874e3b28ae86c833f57c7aa3389 github.com/apache/thrift/lib/go/thrift
|
||||
d420e28024ad527390b43aa7f64e029083e11989 github.com/bwmarrin/discordgo
|
||||
adab96458c51a58dc1783b3335dcce5461522e75 github.com/davecgh/go-spew/spew
|
||||
b1fe83b5b03f624450823b751b662259ffc6af70 github.com/eapache/go-resiliency/breaker
|
||||
bb955e01b9346ac19dc29eb16586c90ded99a98c github.com/eapache/go-xerial-snappy
|
||||
44cc805cf13205b55f69e14bcb69867d1ae92f98 github.com/eapache/queue
|
||||
390ab7935ee28ec6b286364bba9b4dd6410cb3d5 github.com/go-logfmt/logfmt
|
||||
fcdc5011193ff531a548e9b0301828d5a5b97fd8 github.com/gogo/protobuf/proto
|
||||
553a641470496b2327abcac10b36396bd98e45c9 github.com/golang/snappy
|
||||
806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/agent
|
||||
806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/internal
|
||||
806455e841dcb0614891ad847500aaa6b10d3d1d github.com/google/gops/signal
|
||||
a69d9f6de432e2c6b296a947d8a5ee88f68522cf github.com/gorilla/websocket
|
||||
9d9ddadf44b4c17c42bafdc530ddeee1927c067d github.com/joho/godotenv
|
||||
9d9ddadf44b4c17c42bafdc530ddeee1927c067d github.com/joho/godotenv/autoload
|
||||
67f268f20922975c067ed799e4be6bacf152208c github.com/namsral/flag
|
||||
9497d909de390b9a8dd0d88975cf0ce7d0b4d688 github.com/nishanths/go-xkcd
|
||||
a52f2342449246d5bcc273e65cbdcfa5f7d6c63c github.com/opentracing-contrib/go-observer
|
||||
8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go
|
||||
8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go/ext
|
||||
8ebe5d4e236eed9fd88e593c288bfb804d630b8c github.com/opentracing/opentracing-go/log
|
||||
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing
|
||||
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/flag
|
||||
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/scribe
|
||||
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/thrift/gen-go/zipkincore
|
||||
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/types
|
||||
37e942825de0f846d15acc3bc9d027c9134a9b25 github.com/openzipkin/zipkin-go-opentracing/wire
|
||||
5a3d2245f97fc249850e7802e3c01fad02a1c316 github.com/pierrec/lz4
|
||||
a0006b13c722f7f12368c00a3d3c2ae8a999a0c6 github.com/pierrec/xxHash/xxHash32
|
||||
c605e284fe17294bda444b34710735b29d1a9d90 github.com/pkg/errors
|
||||
1f30fe9094a513ce4c700b9a54458bbb0c96996c github.com/rcrowley/go-metrics
|
||||
736158dc09e10f1911ca3a1e1b01f11b566ce5db github.com/robfig/cron
|
||||
42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/nacl/secretbox
|
||||
42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/poly1305
|
||||
42ff06aea7c329876e5a0fe94acc96902accf0ad golang.org/x/crypto/salsa20/salsa
|
||||
f5079bd7f6f74e23c4d65efa0f4ce14cbd6a3c0f golang.org/x/net/context
|
||||
f51c12702a4d776e4c1fa9b0fabab841babae631 golang.org/x/time/rate
|
||||
ae77be60afb1dcacde03767a8c37337fad28ac14 github.com/kardianos/osext
|
||||
6a18b51d929caddbe795e1609195dee1d1cc729e github.com/justinian/dice
|
|
@ -0,0 +1,24 @@
|
|||
package sarama
|
||||
|
||||
type ApiVersionsRequest struct {
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
package sarama
|
||||
|
||||
type ApiVersionsResponseBlock struct {
|
||||
ApiKey int16
|
||||
MinVersion int16
|
||||
MaxVersion int16
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
|
||||
pe.putInt16(b.ApiKey)
|
||||
pe.putInt16(b.MinVersion)
|
||||
pe.putInt16(b.MaxVersion)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
|
||||
var err error
|
||||
|
||||
if b.ApiKey, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MinVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.MaxVersion, err = pd.getInt16(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ApiVersionsResponse struct {
|
||||
Err KError
|
||||
ApiVersions []*ApiVersionsResponseBlock
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
|
||||
pe.putInt16(int16(r.Err))
|
||||
if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, apiVersion := range r.ApiVersions {
|
||||
if err := apiVersion.encode(pe); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
|
||||
kerr, err := pd.getInt16()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.Err = KError(kerr)
|
||||
|
||||
numBlocks, err := pd.getArrayLength()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
|
||||
for i := 0; i < numBlocks; i++ {
|
||||
block := new(ApiVersionsResponseBlock)
|
||||
if err := block.decode(pd); err != nil {
|
||||
return err
|
||||
}
|
||||
r.ApiVersions[i] = block
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) key() int16 {
|
||||
return 18
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) version() int16 {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
|
||||
return V0_10_0_0
|
||||
}
|
|
@ -0,0 +1,904 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/eapache/go-resiliency/breaker"
|
||||
"github.com/eapache/queue"
|
||||
)
|
||||
|
||||
// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
|
||||
// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
|
||||
// and parses responses for errors. You must read from the Errors() channel or the
|
||||
// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
|
||||
// leaks: it will not be garbage-collected automatically when it passes out of
|
||||
// scope.
|
||||
type AsyncProducer interface {
|
||||
|
||||
// AsyncClose triggers a shutdown of the producer. The shutdown has completed
|
||||
// when both the Errors and Successes channels have been closed. When calling
|
||||
// AsyncClose, you *must* continue to read from those channels in order to
|
||||
// drain the results of any messages in flight.
|
||||
AsyncClose()
|
||||
|
||||
// Close shuts down the producer and waits for any buffered messages to be
|
||||
// flushed. You must call this function before a producer object passes out of
|
||||
// scope, as it may otherwise leak memory. You must call this before calling
|
||||
// Close on the underlying client.
|
||||
Close() error
|
||||
|
||||
// Input is the input channel for the user to write messages to that they
|
||||
// wish to send.
|
||||
Input() chan<- *ProducerMessage
|
||||
|
||||
// Successes is the success output channel back to the user when Return.Successes is
|
||||
// enabled. If Return.Successes is true, you MUST read from this channel or the
|
||||
// Producer will deadlock. It is suggested that you send and read messages
|
||||
// together in a single select statement.
|
||||
Successes() <-chan *ProducerMessage
|
||||
|
||||
// Errors is the error output channel back to the user. You MUST read from this
|
||||
// channel or the Producer will deadlock when the channel is full. Alternatively,
|
||||
// you can set Producer.Return.Errors in your config to false, which prevents
|
||||
// errors to be returned.
|
||||
Errors() <-chan *ProducerError
|
||||
}
|
||||
|
||||
type asyncProducer struct {
|
||||
client Client
|
||||
conf *Config
|
||||
ownClient bool
|
||||
|
||||
errors chan *ProducerError
|
||||
input, successes, retries chan *ProducerMessage
|
||||
inFlight sync.WaitGroup
|
||||
|
||||
brokers map[*Broker]chan<- *ProducerMessage
|
||||
brokerRefs map[chan<- *ProducerMessage]int
|
||||
brokerLock sync.Mutex
|
||||
}
|
||||
|
||||
// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
|
||||
func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
|
||||
client, err := NewClient(addrs, conf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
p, err := NewAsyncProducerFromClient(client)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.(*asyncProducer).ownClient = true
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
|
||||
// necessary to call Close() on the underlying client when shutting down this producer.
|
||||
func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
|
||||
// Check that we are not dealing with a closed Client before processing any other arguments
|
||||
if client.Closed() {
|
||||
return nil, ErrClosedClient
|
||||
}
|
||||
|
||||
p := &asyncProducer{
|
||||
client: client,
|
||||
conf: client.Config(),
|
||||
errors: make(chan *ProducerError),
|
||||
input: make(chan *ProducerMessage),
|
||||
successes: make(chan *ProducerMessage),
|
||||
retries: make(chan *ProducerMessage),
|
||||
brokers: make(map[*Broker]chan<- *ProducerMessage),
|
||||
brokerRefs: make(map[chan<- *ProducerMessage]int),
|
||||
}
|
||||
|
||||
// launch our singleton dispatchers
|
||||
go withRecover(p.dispatcher)
|
||||
go withRecover(p.retryHandler)
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
type flagSet int8
|
||||
|
||||
const (
|
||||
syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
|
||||
fin // final message from partitionProducer to brokerProducer and back
|
||||
shutdown // start the shutdown process
|
||||
)
|
||||
|
||||
// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
|
||||
type ProducerMessage struct {
|
||||
Topic string // The Kafka topic for this message.
|
||||
// The partitioning key for this message. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Key Encoder
|
||||
// The actual message to store in Kafka. Pre-existing Encoders include
|
||||
// StringEncoder and ByteEncoder.
|
||||
Value Encoder
|
||||
|
||||
// This field is used to hold arbitrary data you wish to include so it
|
||||
// will be available when receiving on the Successes and Errors channels.
|
||||
// Sarama completely ignores this field and is only to be used for
|
||||
// pass-through data.
|
||||
Metadata interface{}
|
||||
|
||||
// Below this point are filled in by the producer as the message is processed
|
||||
|
||||
// Offset is the offset of the message stored on the broker. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered and
|
||||
// RequiredAcks is not NoResponse.
|
||||
Offset int64
|
||||
// Partition is the partition that the message was sent to. This is only
|
||||
// guaranteed to be defined if the message was successfully delivered.
|
||||
Partition int32
|
||||
// Timestamp is the timestamp assigned to the message by the broker. This
|
||||
// is only guaranteed to be defined if the message was successfully
|
||||
// delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
|
||||
// least version 0.10.0.
|
||||
Timestamp time.Time
|
||||
|
||||
retries int
|
||||
flags flagSet
|
||||
}
|
||||
|
||||
const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
|
||||
|
||||
func (m *ProducerMessage) byteSize() int {
|
||||
size := producerMessageOverhead
|
||||
if m.Key != nil {
|
||||
size += m.Key.Length()
|
||||
}
|
||||
if m.Value != nil {
|
||||
size += m.Value.Length()
|
||||
}
|
||||
return size
|
||||
}
|
||||
|
||||
func (m *ProducerMessage) clear() {
|
||||
m.flags = 0
|
||||
m.retries = 0
|
||||
}
|
||||
|
||||
// ProducerError is the type of error generated when the producer fails to deliver a message.
|
||||
// It contains the original ProducerMessage as well as the actual error value.
|
||||
type ProducerError struct {
|
||||
Msg *ProducerMessage
|
||||
Err error
|
||||
}
|
||||
|
||||
func (pe ProducerError) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
|
||||
}
|
||||
|
||||
// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
|
||||
// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
|
||||
// when closing a producer.
|
||||
type ProducerErrors []*ProducerError
|
||||
|
||||
func (pe ProducerErrors) Error() string {
|
||||
return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Errors() <-chan *ProducerError {
|
||||
return p.errors
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Successes() <-chan *ProducerMessage {
|
||||
return p.successes
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Input() chan<- *ProducerMessage {
|
||||
return p.input
|
||||
}
|
||||
|
||||
func (p *asyncProducer) Close() error {
|
||||
p.AsyncClose()
|
||||
|
||||
if p.conf.Producer.Return.Successes {
|
||||
go withRecover(func() {
|
||||
for range p.successes {
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
var errors ProducerErrors
|
||||
if p.conf.Producer.Return.Errors {
|
||||
for event := range p.errors {
|
||||
errors = append(errors, event)
|
||||
}
|
||||
} else {
|
||||
<-p.errors
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return errors
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *asyncProducer) AsyncClose() {
|
||||
go withRecover(p.shutdown)
|
||||
}
|
||||
|
||||
// singleton
|
||||
// dispatches messages by topic
|
||||
func (p *asyncProducer) dispatcher() {
|
||||
handlers := make(map[string]chan<- *ProducerMessage)
|
||||
shuttingDown := false
|
||||
|
||||
for msg := range p.input {
|
||||
if msg == nil {
|
||||
Logger.Println("Something tried to send a nil message, it was ignored.")
|
||||
continue
|
||||
}
|
||||
|
||||
if msg.flags&shutdown != 0 {
|
||||
shuttingDown = true
|
||||
p.inFlight.Done()
|
||||
continue
|
||||
} else if msg.retries == 0 {
|
||||
if shuttingDown {
|
||||
// we can't just call returnError here because that decrements the wait group,
|
||||
// which hasn't been incremented yet for this message, and shouldn't be
|
||||
pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
continue
|
||||
}
|
||||
p.inFlight.Add(1)
|
||||
}
|
||||
|
||||
if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
|
||||
p.returnError(msg, ErrMessageSizeTooLarge)
|
||||
continue
|
||||
}
|
||||
|
||||
handler := handlers[msg.Topic]
|
||||
if handler == nil {
|
||||
handler = p.newTopicProducer(msg.Topic)
|
||||
handlers[msg.Topic] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
// one per topic
|
||||
// partitions messages, then dispatches them by partition
|
||||
type topicProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
breaker *breaker.Breaker
|
||||
handlers map[int32]chan<- *ProducerMessage
|
||||
partitioner Partitioner
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
tp := &topicProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
input: input,
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
handlers: make(map[int32]chan<- *ProducerMessage),
|
||||
partitioner: p.conf.Producer.Partitioner(topic),
|
||||
}
|
||||
go withRecover(tp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (tp *topicProducer) dispatch() {
|
||||
for msg := range tp.input {
|
||||
if msg.retries == 0 {
|
||||
if err := tp.partitionMessage(msg); err != nil {
|
||||
tp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
handler := tp.handlers[msg.Partition]
|
||||
if handler == nil {
|
||||
handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
|
||||
tp.handlers[msg.Partition] = handler
|
||||
}
|
||||
|
||||
handler <- msg
|
||||
}
|
||||
|
||||
for _, handler := range tp.handlers {
|
||||
close(handler)
|
||||
}
|
||||
}
|
||||
|
||||
func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
|
||||
var partitions []int32
|
||||
|
||||
err := tp.breaker.Run(func() (err error) {
|
||||
if tp.partitioner.RequiresConsistency() {
|
||||
partitions, err = tp.parent.client.Partitions(msg.Topic)
|
||||
} else {
|
||||
partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
|
||||
}
|
||||
return
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
numPartitions := int32(len(partitions))
|
||||
|
||||
if numPartitions == 0 {
|
||||
return ErrLeaderNotAvailable
|
||||
}
|
||||
|
||||
choice, err := tp.partitioner.Partition(msg, numPartitions)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
} else if choice < 0 || choice >= numPartitions {
|
||||
return ErrInvalidPartition
|
||||
}
|
||||
|
||||
msg.Partition = partitions[choice]
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// one per partition per topic
|
||||
// dispatches messages to the appropriate broker
|
||||
// also responsible for maintaining message order during retries
|
||||
type partitionProducer struct {
|
||||
parent *asyncProducer
|
||||
topic string
|
||||
partition int32
|
||||
input <-chan *ProducerMessage
|
||||
|
||||
leader *Broker
|
||||
breaker *breaker.Breaker
|
||||
output chan<- *ProducerMessage
|
||||
|
||||
// highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
|
||||
// all other messages get buffered in retryState[msg.retries].buf to preserve ordering
|
||||
// retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
|
||||
// therefore whether our buffer is complete and safe to flush)
|
||||
highWatermark int
|
||||
retryState []partitionRetryState
|
||||
}
|
||||
|
||||
type partitionRetryState struct {
|
||||
buf []*ProducerMessage
|
||||
expectChaser bool
|
||||
}
|
||||
|
||||
func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
|
||||
input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
|
||||
pp := &partitionProducer{
|
||||
parent: p,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
input: input,
|
||||
|
||||
breaker: breaker.New(3, 1, 10*time.Second),
|
||||
retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
|
||||
}
|
||||
go withRecover(pp.dispatch)
|
||||
return input
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) dispatch() {
|
||||
// try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
|
||||
// on the first message
|
||||
pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
|
||||
if pp.leader != nil {
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
}
|
||||
|
||||
for msg := range pp.input {
|
||||
if msg.retries > pp.highWatermark {
|
||||
// a new, higher, retry level; handle it and then back off
|
||||
pp.newHighWatermark(msg.retries)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
} else if pp.highWatermark > 0 {
|
||||
// we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
|
||||
if msg.retries < pp.highWatermark {
|
||||
// in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
|
||||
if msg.flags&fin == fin {
|
||||
pp.retryState[msg.retries].expectChaser = false
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
} else {
|
||||
pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
|
||||
}
|
||||
continue
|
||||
} else if msg.flags&fin == fin {
|
||||
// this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
|
||||
// meaning this retry level is done and we can go down (at least) one level and flush that
|
||||
pp.retryState[pp.highWatermark].expectChaser = false
|
||||
pp.flushRetryBuffers()
|
||||
pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// if we made it this far then the current msg contains real data, and can be sent to the next goroutine
|
||||
// without breaking any of our ordering guarantees
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnError(msg, err)
|
||||
time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
|
||||
continue
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
if pp.output != nil {
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) newHighWatermark(hwm int) {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
|
||||
pp.highWatermark = hwm
|
||||
|
||||
// send off a fin so that we know when everything "in between" has made it
|
||||
// back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
|
||||
pp.retryState[pp.highWatermark].expectChaser = true
|
||||
pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
|
||||
|
||||
// a new HWM means that our current broker selection is out of date
|
||||
Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
pp.parent.unrefBrokerProducer(pp.leader, pp.output)
|
||||
pp.output = nil
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) flushRetryBuffers() {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
for {
|
||||
pp.highWatermark--
|
||||
|
||||
if pp.output == nil {
|
||||
if err := pp.updateLeader(); err != nil {
|
||||
pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
|
||||
goto flushDone
|
||||
}
|
||||
Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
|
||||
}
|
||||
|
||||
for _, msg := range pp.retryState[pp.highWatermark].buf {
|
||||
pp.output <- msg
|
||||
}
|
||||
|
||||
flushDone:
|
||||
pp.retryState[pp.highWatermark].buf = nil
|
||||
if pp.retryState[pp.highWatermark].expectChaser {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
|
||||
break
|
||||
} else if pp.highWatermark == 0 {
|
||||
Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pp *partitionProducer) updateLeader() error {
|
||||
return pp.breaker.Run(func() (err error) {
|
||||
if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pp.output = pp.parent.getBrokerProducer(pp.leader)
|
||||
pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
|
||||
pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
|
||||
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
// one per broker; also constructs an associated flusher
|
||||
func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
var (
|
||||
input = make(chan *ProducerMessage)
|
||||
bridge = make(chan *produceSet)
|
||||
responses = make(chan *brokerProducerResponse)
|
||||
)
|
||||
|
||||
bp := &brokerProducer{
|
||||
parent: p,
|
||||
broker: broker,
|
||||
input: input,
|
||||
output: bridge,
|
||||
responses: responses,
|
||||
buffer: newProduceSet(p),
|
||||
currentRetries: make(map[string]map[int32]error),
|
||||
}
|
||||
go withRecover(bp.run)
|
||||
|
||||
// minimal bridge to make the network response `select`able
|
||||
go withRecover(func() {
|
||||
for set := range bridge {
|
||||
request := set.buildRequest()
|
||||
|
||||
response, err := broker.Produce(request)
|
||||
|
||||
responses <- &brokerProducerResponse{
|
||||
set: set,
|
||||
err: err,
|
||||
res: response,
|
||||
}
|
||||
}
|
||||
close(responses)
|
||||
})
|
||||
|
||||
return input
|
||||
}
|
||||
|
||||
type brokerProducerResponse struct {
|
||||
set *produceSet
|
||||
err error
|
||||
res *ProduceResponse
|
||||
}
|
||||
|
||||
// groups messages together into appropriately-sized batches for sending to the broker
|
||||
// handles state related to retries etc
|
||||
type brokerProducer struct {
|
||||
parent *asyncProducer
|
||||
broker *Broker
|
||||
|
||||
input <-chan *ProducerMessage
|
||||
output chan<- *produceSet
|
||||
responses <-chan *brokerProducerResponse
|
||||
|
||||
buffer *produceSet
|
||||
timer <-chan time.Time
|
||||
timerFired bool
|
||||
|
||||
closing error
|
||||
currentRetries map[string]map[int32]error
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) run() {
|
||||
var output chan<- *produceSet
|
||||
Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-bp.input:
|
||||
if msg == nil {
|
||||
bp.shutdown()
|
||||
return
|
||||
}
|
||||
|
||||
if msg.flags&syn == syn {
|
||||
Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
if bp.currentRetries[msg.Topic] == nil {
|
||||
bp.currentRetries[msg.Topic] = make(map[int32]error)
|
||||
}
|
||||
bp.currentRetries[msg.Topic][msg.Partition] = nil
|
||||
bp.parent.inFlight.Done()
|
||||
continue
|
||||
}
|
||||
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
bp.parent.retryMessage(msg, reason)
|
||||
|
||||
if bp.closing == nil && msg.flags&fin == fin {
|
||||
// we were retrying this partition but we can start processing again
|
||||
delete(bp.currentRetries[msg.Topic], msg.Partition)
|
||||
Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
|
||||
bp.broker.ID(), msg.Topic, msg.Partition)
|
||||
}
|
||||
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.buffer.wouldOverflow(msg) {
|
||||
if err := bp.waitForSpace(msg); err != nil {
|
||||
bp.parent.retryMessage(msg, err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := bp.buffer.add(msg); err != nil {
|
||||
bp.parent.returnError(msg, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
|
||||
bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
|
||||
}
|
||||
case <-bp.timer:
|
||||
bp.timerFired = true
|
||||
case output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
if bp.timerFired || bp.buffer.readyToFlush() {
|
||||
output = bp.output
|
||||
} else {
|
||||
output = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) shutdown() {
|
||||
for !bp.buffer.empty() {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
close(bp.output)
|
||||
for response := range bp.responses {
|
||||
bp.handleResponse(response)
|
||||
}
|
||||
|
||||
Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
|
||||
if bp.closing != nil {
|
||||
return bp.closing
|
||||
}
|
||||
|
||||
return bp.currentRetries[msg.Topic][msg.Partition]
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
|
||||
Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
|
||||
|
||||
for {
|
||||
select {
|
||||
case response := <-bp.responses:
|
||||
bp.handleResponse(response)
|
||||
// handling a response can change our state, so re-check some things
|
||||
if reason := bp.needsRetry(msg); reason != nil {
|
||||
return reason
|
||||
} else if !bp.buffer.wouldOverflow(msg) {
|
||||
return nil
|
||||
}
|
||||
case bp.output <- bp.buffer:
|
||||
bp.rollOver()
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) rollOver() {
|
||||
bp.timer = nil
|
||||
bp.timerFired = false
|
||||
bp.buffer = newProduceSet(bp.parent)
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
|
||||
if response.err != nil {
|
||||
bp.handleError(response.set, response.err)
|
||||
} else {
|
||||
bp.handleSuccess(response.set, response.res)
|
||||
}
|
||||
|
||||
if bp.buffer.empty() {
|
||||
bp.rollOver() // this can happen if the response invalidated our buffer
|
||||
}
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
|
||||
// we iterate through the blocks in the request set, not the response, so that we notice
|
||||
// if the response is missing a block completely
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
if response == nil {
|
||||
// this only happens when RequiredAcks is NoResponse, so we have to assume success
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
return
|
||||
}
|
||||
|
||||
block := response.GetBlock(topic, partition)
|
||||
if block == nil {
|
||||
bp.parent.returnErrors(msgs, ErrIncompleteResponse)
|
||||
return
|
||||
}
|
||||
|
||||
switch block.Err {
|
||||
// Success
|
||||
case ErrNoError:
|
||||
if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
|
||||
for _, msg := range msgs {
|
||||
msg.Timestamp = block.Timestamp
|
||||
}
|
||||
}
|
||||
for i, msg := range msgs {
|
||||
msg.Offset = block.Offset + int64(i)
|
||||
}
|
||||
bp.parent.returnSuccesses(msgs)
|
||||
// Retriable errors
|
||||
case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
|
||||
ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
|
||||
Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
|
||||
bp.broker.ID(), topic, partition, block.Err)
|
||||
bp.currentRetries[topic][partition] = block.Err
|
||||
bp.parent.retryMessages(msgs, block.Err)
|
||||
bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
|
||||
// Other non-retriable errors
|
||||
default:
|
||||
bp.parent.returnErrors(msgs, block.Err)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func (bp *brokerProducer) handleError(sent *produceSet, err error) {
|
||||
switch err.(type) {
|
||||
case PacketEncodingError:
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.returnErrors(msgs, err)
|
||||
})
|
||||
default:
|
||||
Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
|
||||
bp.parent.abandonBrokerConnection(bp.broker)
|
||||
_ = bp.broker.Close()
|
||||
bp.closing = err
|
||||
sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
|
||||
bp.parent.retryMessages(msgs, err)
|
||||
})
|
||||
bp.rollOver()
|
||||
}
|
||||
}
|
||||
|
||||
// singleton
|
||||
// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
|
||||
// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
|
||||
func (p *asyncProducer) retryHandler() {
|
||||
var msg *ProducerMessage
|
||||
buf := queue.New()
|
||||
|
||||
for {
|
||||
if buf.Length() == 0 {
|
||||
msg = <-p.retries
|
||||
} else {
|
||||
select {
|
||||
case msg = <-p.retries:
|
||||
case p.input <- buf.Peek().(*ProducerMessage):
|
||||
buf.Remove()
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
buf.Add(msg)
|
||||
}
|
||||
}
|
||||
|
||||
// utility functions
|
||||
|
||||
func (p *asyncProducer) shutdown() {
|
||||
Logger.Println("Producer shutting down.")
|
||||
p.inFlight.Add(1)
|
||||
p.input <- &ProducerMessage{flags: shutdown}
|
||||
|
||||
p.inFlight.Wait()
|
||||
|
||||
if p.ownClient {
|
||||
err := p.client.Close()
|
||||
if err != nil {
|
||||
Logger.Println("producer/shutdown failed to close the embedded client:", err)
|
||||
}
|
||||
}
|
||||
|
||||
close(p.input)
|
||||
close(p.retries)
|
||||
close(p.errors)
|
||||
close(p.successes)
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
|
||||
msg.clear()
|
||||
pErr := &ProducerError{Msg: msg, Err: err}
|
||||
if p.conf.Producer.Return.Errors {
|
||||
p.errors <- pErr
|
||||
} else {
|
||||
Logger.Println(pErr)
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.returnError(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
|
||||
for _, msg := range batch {
|
||||
if p.conf.Producer.Return.Successes {
|
||||
msg.clear()
|
||||
p.successes <- msg
|
||||
}
|
||||
p.inFlight.Done()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
|
||||
if msg.retries >= p.conf.Producer.Retry.Max {
|
||||
p.returnError(msg, err)
|
||||
} else {
|
||||
msg.retries++
|
||||
p.retries <- msg
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
|
||||
for _, msg := range batch {
|
||||
p.retryMessage(msg, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
bp := p.brokers[broker]
|
||||
|
||||
if bp == nil {
|
||||
bp = p.newBrokerProducer(broker)
|
||||
p.brokers[broker] = bp
|
||||
p.brokerRefs[bp] = 0
|
||||
}
|
||||
|
||||
p.brokerRefs[bp]++
|
||||
|
||||
return bp
|
||||
}
|
||||
|
||||
func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
p.brokerRefs[bp]--
|
||||
if p.brokerRefs[bp] == 0 {
|
||||
close(bp)
|
||||
delete(p.brokerRefs, bp)
|
||||
|
||||
if p.brokers[broker] == bp {
|
||||
delete(p.brokers, broker)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
|
||||
p.brokerLock.Lock()
|
||||
defer p.brokerLock.Unlock()
|
||||
|
||||
delete(p.brokers, broker)
|
||||
}
|
|
@ -0,0 +1,685 @@
|
|||
package sarama
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/rcrowley/go-metrics"
|
||||
)
|
||||
|
||||
// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
|
||||
type Broker struct {
|
||||
id int32
|
||||
addr string
|
||||
|
||||
conf *Config
|
||||
correlationID int32
|
||||
conn net.Conn
|
||||
connErr error
|
||||
lock sync.Mutex
|
||||
opened int32
|
||||
|
||||
responses chan responsePromise
|
||||
done chan bool
|
||||
|
||||
incomingByteRate metrics.Meter
|
||||
requestRate metrics.Meter
|
||||
requestSize metrics.Histogram
|
||||
requestLatency metrics.Histogram
|
||||
outgoingByteRate metrics.Meter
|
||||
responseRate metrics.Meter
|
||||
responseSize metrics.Histogram
|
||||
brokerIncomingByteRate metrics.Meter
|
||||
brokerRequestRate metrics.Meter
|
||||
brokerRequestSize metrics.Histogram
|
||||
brokerRequestLatency metrics.Histogram
|
||||
brokerOutgoingByteRate metrics.Meter
|
||||
brokerResponseRate metrics.Meter
|
||||
brokerResponseSize metrics.Histogram
|
||||
}
|
||||
|
||||
type responsePromise struct {
|
||||
requestTime time.Time
|
||||
correlationID int32
|
||||
packets chan []byte
|
||||
errors chan error
|
||||
}
|
||||
|
||||
// NewBroker creates and returns a Broker targeting the given host:port address.
|
||||
// This does not attempt to actually connect, you have to call Open() for that.
|
||||
func NewBroker(addr string) *Broker {
|
||||
return &Broker{id: -1, addr: addr}
|
||||
}
|
||||
|
||||
// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
|
||||
// waiting for the connection to complete. This means that any subsequent operations on the broker will
|
||||
// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
|
||||
// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
|
||||
// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
|
||||
func (b *Broker) Open(conf *Config) error {
|
||||
if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
|
||||
return ErrAlreadyConnected
|
||||
}
|
||||
|
||||
if conf == nil {
|
||||
conf = NewConfig()
|
||||
}
|
||||
|
||||
err := conf.Validate()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.lock.Lock()
|
||||
|
||||
go withRecover(func() {
|
||||
defer b.lock.Unlock()
|
||||
|
||||
dialer := net.Dialer{
|
||||
Timeout: conf.Net.DialTimeout,
|
||||
KeepAlive: conf.Net.KeepAlive,
|
||||
}
|
||||
|
||||
if conf.Net.TLS.Enable {
|
||||
b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
|
||||
} else {
|
||||
b.conn, b.connErr = dialer.Dial("tcp", b.addr)
|
||||
}
|
||||
if b.connErr != nil {
|
||||
Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
b.conn = newBufConn(b.conn)
|
||||
|
||||
b.conf = conf
|
||||
|
||||
// Create or reuse the global metrics shared between brokers
|
||||
b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
|
||||
b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
|
||||
b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
|
||||
b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
|
||||
b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
|
||||
b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
|
||||
b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
|
||||
// Do not gather metrics for seeded broker (only used during bootstrap) because they share
|
||||
// the same id (-1) and are already exposed through the global metrics above
|
||||
if b.id >= 0 {
|
||||
b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
|
||||
b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
|
||||
b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
|
||||
b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
|
||||
b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
|
||||
}
|
||||
|
||||
if conf.Net.SASL.Enable {
|
||||
b.connErr = b.sendAndReceiveSASLPlainAuth()
|
||||
if b.connErr != nil {
|
||||
err = b.conn.Close()
|
||||
if err == nil {
|
||||
Logger.Printf("Closed connection to broker %s\n", b.addr)
|
||||
} else {
|
||||
Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
|
||||
}
|
||||
b.conn = nil
|
||||
atomic.StoreInt32(&b.opened, 0)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.done = make(chan bool)
|
||||
b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
|
||||
|
||||
if b.id >= 0 {
|
||||
Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
|
||||
} else {
|
||||
Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
|
||||
}
|
||||
go withRecover(b.responseReceiver)
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Connected returns true if the broker is connected and false otherwise. If the broker is not
|
||||
// connected but it had tried to connect, the error from that connection attempt is also returned.
|
||||