From 35f0a86c2ca1f815b9156c941130991c7ea63d60 Mon Sep 17 00:00:00 2001 From: Christine Dodrill Date: Fri, 18 Aug 2017 00:34:41 -0700 Subject: [PATCH] initial commit --- .dockerignore | 1 + Dockerfile | 16 + LICENSE | 19 + README.md | 25 + cmd/kronos/main.go | 140 + cmd/mercyd/main.go | 7 + cmd/worker/main.go | 80 + docker-compose.yml | 50 + internal/common/nats.go | 30 + internal/common/rqlite.go | 27 + internal/common/types.go | 43 + internal/database/database.go | 1 + internal/database/events.go | 195 ++ internal/database/events_test.go | 40 + vendor-log | 8 + vendor/github.com/Xe/gorqlite/api.go | 202 ++ vendor/github.com/Xe/gorqlite/cluster.go | 221 ++ vendor/github.com/Xe/gorqlite/conn.go | 303 ++ vendor/github.com/Xe/gorqlite/gorqlite.go | 187 ++ .../Xe/gorqlite/prepared_statement.go | 54 + vendor/github.com/Xe/gorqlite/query.go | 396 +++ vendor/github.com/Xe/gorqlite/write.go | 179 + vendor/github.com/Xe/uuid/dce.go | 84 + vendor/github.com/Xe/uuid/doc.go | 8 + vendor/github.com/Xe/uuid/hash.go | 53 + vendor/github.com/Xe/uuid/node.go | 101 + vendor/github.com/Xe/uuid/time.go | 132 + vendor/github.com/Xe/uuid/util.go | 43 + vendor/github.com/Xe/uuid/uuid.go | 163 + vendor/github.com/Xe/uuid/version1.go | 41 + vendor/github.com/Xe/uuid/version4.go | 25 + vendor/github.com/caarlos0/env/env.go | 285 ++ vendor/github.com/nats-io/go-nats/context.go | 166 + vendor/github.com/nats-io/go-nats/enc.go | 249 ++ .../go-nats/encoders/builtin/default_enc.go | 106 + .../go-nats/encoders/builtin/gob_enc.go | 34 + .../go-nats/encoders/builtin/json_enc.go | 45 + vendor/github.com/nats-io/go-nats/nats.go | 2975 +++++++++++++++++ vendor/github.com/nats-io/go-nats/netchan.go | 100 + vendor/github.com/nats-io/go-nats/parser.go | 470 +++ vendor/github.com/nats-io/go-nats/timer.go | 43 + vendor/github.com/nats-io/go-nats/util/tls.go | 37 + .../nats-io/go-nats/util/tls_pre17.go | 35 + vendor/github.com/nats-io/nuid/nuid.go | 124 + .../github.com/robfig/cron/constantdelay.go | 27 + vendor/github.com/robfig/cron/cron.go | 259 ++ vendor/github.com/robfig/cron/doc.go | 129 + vendor/github.com/robfig/cron/parser.go | 380 +++ vendor/github.com/robfig/cron/spec.go | 158 + 49 files changed, 8496 insertions(+) create mode 100644 .dockerignore create mode 100644 Dockerfile create mode 100644 LICENSE create mode 100644 README.md create mode 100644 cmd/kronos/main.go create mode 100644 cmd/mercyd/main.go create mode 100644 cmd/worker/main.go create mode 100644 docker-compose.yml create mode 100644 internal/common/nats.go create mode 100644 internal/common/rqlite.go create mode 100644 internal/common/types.go create mode 100644 internal/database/database.go create mode 100644 internal/database/events.go create mode 100644 internal/database/events_test.go create mode 100644 vendor-log create mode 100644 vendor/github.com/Xe/gorqlite/api.go create mode 100644 vendor/github.com/Xe/gorqlite/cluster.go create mode 100644 vendor/github.com/Xe/gorqlite/conn.go create mode 100644 vendor/github.com/Xe/gorqlite/gorqlite.go create mode 100644 vendor/github.com/Xe/gorqlite/prepared_statement.go create mode 100644 vendor/github.com/Xe/gorqlite/query.go create mode 100644 vendor/github.com/Xe/gorqlite/write.go create mode 100644 vendor/github.com/Xe/uuid/dce.go create mode 100644 vendor/github.com/Xe/uuid/doc.go create mode 100644 vendor/github.com/Xe/uuid/hash.go create mode 100644 vendor/github.com/Xe/uuid/node.go create mode 100644 vendor/github.com/Xe/uuid/time.go create mode 100644 vendor/github.com/Xe/uuid/util.go create mode 100644 vendor/github.com/Xe/uuid/uuid.go create mode 100644 vendor/github.com/Xe/uuid/version1.go create mode 100644 vendor/github.com/Xe/uuid/version4.go create mode 100644 vendor/github.com/caarlos0/env/env.go create mode 100644 vendor/github.com/nats-io/go-nats/context.go create mode 100644 vendor/github.com/nats-io/go-nats/enc.go create mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go create mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go create mode 100644 vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go create mode 100644 vendor/github.com/nats-io/go-nats/nats.go create mode 100644 vendor/github.com/nats-io/go-nats/netchan.go create mode 100644 vendor/github.com/nats-io/go-nats/parser.go create mode 100644 vendor/github.com/nats-io/go-nats/timer.go create mode 100644 vendor/github.com/nats-io/go-nats/util/tls.go create mode 100644 vendor/github.com/nats-io/go-nats/util/tls_pre17.go create mode 100644 vendor/github.com/nats-io/nuid/nuid.go create mode 100644 vendor/github.com/robfig/cron/constantdelay.go create mode 100644 vendor/github.com/robfig/cron/cron.go create mode 100644 vendor/github.com/robfig/cron/doc.go create mode 100644 vendor/github.com/robfig/cron/parser.go create mode 100644 vendor/github.com/robfig/cron/spec.go diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..2eea525 --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +.env \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..dd80259 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,16 @@ +FROM xena/alpine + +RUN apk add go build-base # TODO: move to bottom RUN segment + +ADD ./vendor /go/src/git.xeserv.us/xena/mercy/vendor +ADD ./internal /go/src/git.xeserv.us/xena/mercy/internal +ADD ./cmd /go/src/git.xeserv.us/xena/mercy/cmd + +ENV GOPATH /go +ENV CGO_ENABLED 0 + +RUN mkdir -p /go/bin && cd /go/bin \ + && go build git.xeserv.us/xena/mercy/cmd/worker \ + && go build git.xeserv.us/xena/mercy/cmd/kronos \ + && go build git.xeserv.us/xena/mercy/cmd/mercyd + #&& apk del go build-base diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..fb06e42 --- /dev/null +++ b/LICENSE @@ -0,0 +1,19 @@ +Copyright (c) 2017 Christine Dodrill + +This software is provided 'as-is', without any express or implied +warranty. In no event will the authors be held liable for any damages +arising from the use of this software. + +Permission is granted to anyone to use this software for any purpose, +including commercial applications, and to alter it and redistribute it +freely, subject to the following restrictions: + +1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgement in the product documentation would be + appreciated but is not required. + +2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + +3. This notice may not be removed or altered from any source distribution. \ No newline at end of file diff --git a/README.md b/README.md new file mode 100644 index 0000000..5681091 --- /dev/null +++ b/README.md @@ -0,0 +1,25 @@ +mercy +===== + +> The wonders of modern cluster administration + +This is basically an experiment in microservices and rqlite to solve a problem +that has been solved before, but this is different because it's simpler and +theoretically much easier to scale. + +## Components + +- `worker` + workers wait for incoming health check work and execute it, returning the results + to the `results` queue. +- `kronos` + kronos schedules work and records the results of checks. +- `mercyd` + mercyd is the gRPC server for control RPC. +- `mercy` + mercy is a snazzy little command line application for talking to mercyd. + +## Rationale + +This is a simpler, easier to understand implementation of something I am sure +has been written to death. diff --git a/cmd/kronos/main.go b/cmd/kronos/main.go new file mode 100644 index 0000000..b4a9163 --- /dev/null +++ b/cmd/kronos/main.go @@ -0,0 +1,140 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "log" + "net/http" + "os" + "strconv" + "time" + + "git.xeserv.us/xena/mercy/internal/common" + "git.xeserv.us/xena/mercy/internal/database" + "github.com/Xe/gorqlite" + "github.com/Xe/uuid" + "github.com/caarlos0/env" + nats "github.com/nats-io/go-nats" + "github.com/robfig/cron" +) + +type config struct { + NatsURL string `env:"NATS_URL,required"` + DatabaseURL string `env:"DATABASE_URL,required"` + Debug bool `env:"DEBUG"` +} + +func main() { + var cfg config + err := env.Parse(&cfg) + if err != nil { + log.Fatal(err) + } + + nc, err := common.NatsConnect(cfg.NatsURL) + if err != nil { + log.Fatal(err) + } + _ = nc + + log.Printf("connecting to %s", cfg.DatabaseURL) + db, err := common.RQLiteConnect(cfg.DatabaseURL) + if err != nil { + log.Fatal(err) + } + + if cfg.Debug { + gorqlite.TraceOn(os.Stderr) + } + + chks := database.NewChecks(db) + err = chks.Migrate() + if err != nil { + log.Fatal(err) + } + + resuls := database.NewResults(db) + err = resuls.Migrate() + if err != nil { + log.Fatal(err) + } + + nc.QueueSubscribe("results", "kronos", func(m *nats.Msg) { + var cr common.CheckResult + err := json.Unmarshal(m.Data, &cr) + if err != nil { + log.Printf("results: error when decoding json: %v", err) + return + } + + err = resuls.InsResult(cr) + if err != nil { + log.Printf("results: error when inserting result record: %v", err) + return + } + + cid, err := strconv.ParseInt(cr.Preamble.CheckID, 10, 64) + if err != nil { + log.Printf("results: %s is not a number: %v", cr.Preamble.CheckID, err) + return + } + + chk, err := chks.GetCheck(cid) + if err != nil { + log.Printf("results: can't get check: %v", err) + return + } + + _, err = http.Post(chk.ReportWebhook, "application/json", bytes.NewBuffer(m.Data)) + if err != nil { + log.Printf("results: http.Post(%q): %v", chk.ReportWebhook, err) + return + } + + err = chks.UpdateLastResult(cid, cr.Result) + if err != nil { + log.Printf("results: updating last check result for cid %d: %v", cid, err) + return + } + }) + + c := cron.New() + c.AddFunc("@every 1m", func() { + log.Printf("scheduling checks") + cl, err := chks.GetAllChecks() + if err != nil { + log.Printf("getAllChecks: %v", err) + return + } + + for _, chk := range cl { + go func() { + chr := common.HTTPCheckRequest{ + Preamble: common.Preamble{ + CustomerID: chk.CustomerID, + CheckID: fmt.Sprintf("%d", chk.ID), + RunID: uuid.New(), + }, + URL: chk.URI, + DegradedThreshold: 500 * time.Millisecond, + FailThreshold: 5 * time.Second, + } + data, err := json.Marshal(&chr) + if err != nil { + log.Printf("error in json-encoding check request %#v: %v", chr, err) + return + } + err = nc.Publish("tasks:http", data) + if err != nil { + log.Printf("error in sending nats request checkID: %d: %v", chk.ID, err) + } + }() + } + }) + c.Start() + + for { + select {} + } +} diff --git a/cmd/mercyd/main.go b/cmd/mercyd/main.go new file mode 100644 index 0000000..5e3344f --- /dev/null +++ b/cmd/mercyd/main.go @@ -0,0 +1,7 @@ +package main + +func main() { + for { + select {} + } +} diff --git a/cmd/worker/main.go b/cmd/worker/main.go new file mode 100644 index 0000000..532b5ba --- /dev/null +++ b/cmd/worker/main.go @@ -0,0 +1,80 @@ +package main + +import ( + "context" + "encoding/json" + "log" + "net/http" + "os" + "time" + + "git.xeserv.us/xena/mercy/internal/common" + "github.com/nats-io/go-nats" +) + +func main() { + uri := os.Getenv("NATS_URL") + nc, err := common.NatsConnect(uri) + if err != nil { + log.Fatal("Error establishing connection to NATS:", err) + } + + log.Printf("Connected to NATS at: %s", nc.ConnectedUrl()) + + nc.QueueSubscribe("tasks:http", "worker", func(m *nats.Msg) { + var hc common.HTTPCheckRequest + err := json.Unmarshal(m.Data, &hc) + if err != nil { + log.Printf("tasks:http: error when decoding json: %v", err) + return + } + + req, err := http.NewRequest(http.MethodGet, hc.URL, nil) + if err != nil { + log.Printf("tasks:http: error when creating request: %v", err) + return + } + ctx, cancel := context.WithTimeout(context.Background(), hc.FailThreshold) + defer cancel() + + req = req.WithContext(ctx) + + result := common.CheckResult{Preamble: hc.Preamble} + before := time.Now() + + resp, err := http.DefaultClient.Do(req) + if err != nil { + switch err { + case context.Canceled: + result.Result = common.Failed + default: + log.Printf("tasks:http: error when fetching url %s: %v", hc.URL, err) + result.ErrData = err.Error() + } + } + + if resp.StatusCode/100 == 2 { + result.Result = common.OK + } + + if now := time.Now(); before.Add(hc.DegradedThreshold).Before(now) { + result.Result = common.Degraded + } + + result.Timestamp = time.Now() + data, err := json.Marshal(&result) + if err != nil { + log.Printf("tasks:http: error encoding json for %#v: %v", result, err) + return + } + + err = nc.Publish("results", data) + if err != nil { + log.Printf("tasks:http: error publishing results: %v", err) + } + }) + + for { + select {} + } +} diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..312a1ab --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,50 @@ +version: "3" + +services: + rqlite: + image: rqlite/rqlite:4.0.2 + volumes: + - rqlite:/rqlite/file + command: -on-disk -http-adv-addr rqlite:4001 + + nats: + image: nats:1.0.2 + entrypoint: "/gnatsd -DV" # uncomment for debugging message queue issues + expose: + - "4222" + ports: + - "8222:8222" + + worker: + image: xena/mercy:$GIT_COMMIT + environment: + NATS_URL: nats://nats:4222 + command: /go/bin/worker + depends_on: + - nats + + kronos: + image: xena/mercy:$GIT_COMMIT + depends_on: + - nats + - rqlite + environment: + NATS_URL: nats://nats:4222 + DATABASE_URL: http://rqlite:4001 + command: /go/bin/kronos + + mercyd: + image: xena/mercy:$GIT_COMMIT + environment: + NATS_URL: nats://nats:4222 + DATABASE_URL: http://rqlite:4001 + command: /go/bin/mercyd + ports: + - "42069:42069" + #- "433:433" # uncomment for production, used for SSL certs via Let's Encrypt + depends_on: + - nats + - rqlite + +volumes: + rqlite: diff --git a/internal/common/nats.go b/internal/common/nats.go new file mode 100644 index 0000000..53daf8e --- /dev/null +++ b/internal/common/nats.go @@ -0,0 +1,30 @@ +package common + +import ( + "log" + "time" + + "github.com/nats-io/go-nats" +) + +func NatsConnect(u string) (*nats.Conn, error) { + var ( + nc *nats.Conn + err error + ) + + for i := 0; i < 5; i++ { + nc, err = nats.Connect(u) + if err == nil { + break + } + log.Printf("sleeping 5 seconds to try again, got error connecting to %s: %v", u, err) + + time.Sleep(5 * time.Second) + } + + if err != nil { + return nil, err + } + return nc, nil +} diff --git a/internal/common/rqlite.go b/internal/common/rqlite.go new file mode 100644 index 0000000..fd61f4e --- /dev/null +++ b/internal/common/rqlite.go @@ -0,0 +1,27 @@ +package common + +import ( + "log" + "time" + + "github.com/Xe/gorqlite" +) + +func RQLiteConnect(u string) (gorqlite.Connection, error) { + var ( + db gorqlite.Connection + err error + ) + + for i := 0; i < 5; i++ { + db, err = gorqlite.Open(u) + if err == nil { + break + } + log.Printf("sleeping 1 second to try again, got error connecting to %s: %v", u, err) + + time.Sleep(time.Second) + } + + return db, err +} diff --git a/internal/common/types.go b/internal/common/types.go new file mode 100644 index 0000000..1eabb4b --- /dev/null +++ b/internal/common/types.go @@ -0,0 +1,43 @@ +package common + +import "time" + +type Result int + +const ( + Unknown Result = iota + OK + Degraded + Failed +) + +type Preamble struct { + CustomerID string `json:"customer_id"` + CheckID string `json:"check_id"` + RunID string `json:"run_id"` +} + +// HTTPCheckRequest +type HTTPCheckRequest struct { + Preamble Preamble `json:"preamble"` + URL string `json:"url"` + DegradedThreshold time.Duration `json:"degraded_threshold"` + FailThreshold time.Duration `json:"fail_threshold"` +} + +type CheckResult struct { + Preamble Preamble `json:"preamble"` + Timestamp time.Time `json:"timestamp"` + Result Result `json:"result"` + ErrData string `json:"errdata,omitempty"` +} + +type Check struct { + ID int64 `json:"id"` + Timestamp time.Time `json:"timestamp"` + CustomerID string `json:"string"` + Active bool `json:"active"` + URI string `json:"uri"` + ReportWebhook string `json:"report_webhook"` + LastResult Result `json:"last_result"` +} diff --git a/internal/database/database.go b/internal/database/database.go new file mode 100644 index 0000000..636bab8 --- /dev/null +++ b/internal/database/database.go @@ -0,0 +1 @@ +package database diff --git a/internal/database/events.go b/internal/database/events.go new file mode 100644 index 0000000..9571b6c --- /dev/null +++ b/internal/database/events.go @@ -0,0 +1,195 @@ +package database + +import ( + "database/sql" + "log" + "time" + + "git.xeserv.us/xena/mercy/internal/common" + "github.com/Xe/gorqlite" +) + +type Results struct { + conn gorqlite.Connection + + insResult gorqlite.PreparedStatement + getResultsForCustomer gorqlite.PreparedStatement +} + +func NewResults(conn gorqlite.Connection) *Results { + return &Results{ + conn: conn, + + insResult: gorqlite.NewPreparedStatement("INSERT INTO results (timestamp, customer_id, check_id, run_id, result, error_msg) VALUES (%d, %s, %s, %s, %d, %s)"), + getResultsForCustomer: gorqlite.NewPreparedStatement("SELECT * FROM results WHERE customer_id=%s"), + } +} + +func (r *Results) Migrate() error { + ddl := []string{ + `CREATE TABLE IF NOT EXISTS results (id INTEGER PRIMARY KEY, timestamp INTEGER, customer_id TEXT NOT NULL, check_id TEXT NOT NULL, run_id TEXT NOT NULL, result INTEGER NOT NULL, error_msg TEXT)`, + } + + _, err := r.conn.Write(ddl) + return err +} + +func (r *Results) InsResult(cr common.CheckResult) error { + _, err := r.conn.WriteOne(r.insResult.Bind(cr.Timestamp.Unix(), cr.Preamble.CustomerID, cr.Preamble.CheckID, cr.Preamble.RunID, cr.Result, cr.ErrData)) + return err +} + +func (r *Results) GetResultsForCustomer(cid string) ([]common.CheckResult, error) { + var result []common.CheckResult + res, err := r.conn.QueryOne(r.getResultsForCustomer.Bind(cid)) + if err != nil { + return nil, err + } + + if res.NumRows() == 0 { + return nil, sql.ErrNoRows + } + + for res.Next() { + var cr common.CheckResult + var ts int64 + var checkResult int64 + err = res.Scan(&cr.Preamble.CheckID, &ts, &cr.Preamble.CustomerID, &cr.Preamble.CheckID, &cr.Preamble.RunID, &checkResult, &cr.ErrData) + if err != nil { + return nil, err + } + + cr.Timestamp = time.Unix(ts, 0) + cr.Result = common.Result(checkResult) + + result = append(result, cr) + } + + return result, nil +} + +type Checks struct { + conn gorqlite.Connection + + insCheck gorqlite.PreparedStatement + getCheck gorqlite.PreparedStatement + getAllChecks gorqlite.PreparedStatement + getChecksForCustomer gorqlite.PreparedStatement + activeCheck gorqlite.PreparedStatement + deactiveCheck gorqlite.PreparedStatement + removeCheck gorqlite.PreparedStatement + updateLastRes gorqlite.PreparedStatement +} + +func NewChecks(conn gorqlite.Connection) *Checks { + return &Checks{ + conn: conn, + insCheck: gorqlite.NewPreparedStatement("INSERT INTO checks (timestamp, customer_id, active, uri, report_webhook, last_result) VALUES (%d, %s, %d, %s, %s, 0)"), + getCheck: gorqlite.NewPreparedStatement("SELECT * FROM checks WHERE id=%d"), + getAllChecks: gorqlite.NewPreparedStatement("SELECT id, timestamp, customer_id, active, uri, report_webhook, last_result FROM checks WHERE active=1"), + getChecksForCustomer: gorqlite.NewPreparedStatement("SELECT * FROM CHECKS WHERE customer_id=%s"), + activeCheck: gorqlite.NewPreparedStatement("UPDATE checks SET active=1 WHERE id=%d"), + + deactiveCheck: gorqlite.NewPreparedStatement("UPDATE checks SET active=0 WHERE id=%d"), + removeCheck: gorqlite.NewPreparedStatement("DELETE FROM checks WHERE id=%d"), + updateLastRes: gorqlite.NewPreparedStatement("UPDATE checks SET last_result=%d WHERE id=%d"), + } +} + +func (c *Checks) Migrate() error { + ddl := []string{ + `CREATE TABLE IF NOT EXISTS checks (id INTEGER PRIMARY KEY, timestamp INTEGER, customer_id TEXT, active INTEGER, uri TEXT, report_webhook TEXT, last_result INTEGER)`, + } + + _, err := c.conn.Write(ddl) + return err +} + +func (c *Checks) InsertCheck(ch *common.Check) error { + var active int + if ch.Active { + active = 1 + } + + res, err := c.conn.WriteOne(c.insCheck.Bind(ch.Timestamp.Unix(), ch.CustomerID, active, ch.URI, ch.ReportWebhook)) + if err != nil { + return err + } + + ch.ID = res.LastInsertID + return nil +} + +func (c *Checks) GetAllChecks() ([]common.Check, error) { + var result []common.Check + q := c.getAllChecks.Bind() + log.Println(q) + res, err := c.conn.QueryOne(q) + if err != nil { + return nil, err + } + if res.Err != nil { + return nil, err + } + + if res.NumRows() == 0 { + return nil, sql.ErrNoRows + } + + for res.Next() { + var ch common.Check + var chid int64 + var ts int64 + var act int64 + var lres int64 + err = res.Scan(&chid, &ts, &ch.CustomerID, &act, &ch.URI, &ch.ReportWebhook, &lres) + if err != nil { + return nil, err + } + + ch.ID = int64(chid) + ch.Timestamp = time.Unix(int64(ts), 0) + ch.Active = act == 1 + ch.LastResult = common.Result(lres) + + result = append(result, ch) + } + + return result, nil +} + +func (c *Checks) GetCheck(cid int64) (*common.Check, error) { + var ch common.Check + + res, err := c.conn.QueryOne(c.getCheck.Bind(cid)) + if err != nil { + return nil, err + } + + if res.NumRows() == 0 { + return nil, sql.ErrNoRows + } + + if !res.Next() { + return nil, sql.ErrNoRows + } + + var ts int64 + var act int64 + var lres int64 + err = res.Scan(&ch.ID, &ts, &ch.CustomerID, &act, &ch.URI, &ch.ReportWebhook, &lres) + if err != nil { + return nil, err + } + + ch.Timestamp = time.Unix(ts, 0) + ch.Active = act == 1 + ch.LastResult = common.Result(lres) + + return &ch, nil +} + +func (c *Checks) UpdateLastResult(cid int64, result common.Result) error { + _, err := c.conn.WriteOne(c.updateLastRes.Bind(int64(result), cid)) + return err +} diff --git a/internal/database/events_test.go b/internal/database/events_test.go new file mode 100644 index 0000000..111e2b6 --- /dev/null +++ b/internal/database/events_test.go @@ -0,0 +1,40 @@ +package database + +import ( + "os" + "testing" + "time" + + "git.xeserv.us/xena/mercy/internal/common" + "github.com/Xe/gorqlite" +) + +func TestChecks(t *testing.T) { + gorqlite.TraceOn(os.Stderr) + db, err := gorqlite.Open("http://") + if err != nil { + t.Fatal(err) + } + + chks := NewChecks(db) + err = chks.Migrate() + if err != nil { + t.Fatal(err) + } + + err = chks.InsertCheck(&common.Check{ + Timestamp: time.Now(), + CustomerID: "aliens", + Active: true, + URI: "https://cetacean.club", + ReportWebhook: "https://google.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err := chks.GetAllChecks() + if err != nil { + t.Fatal(err) + } +} diff --git a/vendor-log b/vendor-log new file mode 100644 index 0000000..3f89411 --- /dev/null +++ b/vendor-log @@ -0,0 +1,8 @@ +b4479c874d87db74c2049a1b4abd55eb162c70fb github.com/nats-io/go-nats +b4479c874d87db74c2049a1b4abd55eb162c70fb github.com/nats-io/go-nats/encoders/builtin +b4479c874d87db74c2049a1b4abd55eb162c70fb github.com/nats-io/go-nats/util +3cf34f9fca4e88afa9da8eabd75e3326c9941b44 github.com/nats-io/nuid +0cf029d5748c52beb2c9d20c81880cb4bdf8f788 github.com/caarlos0/env +edf576cd33fb6773ed3042186ba689ced21c55b1 github.com/Xe/gorqlite +736158dc09e10f1911ca3a1e1b01f11b566ce5db github.com/robfig/cron +62b230097e9c9534ca2074782b25d738c4b68964 github.com/Xe/uuid diff --git a/vendor/github.com/Xe/gorqlite/api.go b/vendor/github.com/Xe/gorqlite/api.go new file mode 100644 index 0000000..9373377 --- /dev/null +++ b/vendor/github.com/Xe/gorqlite/api.go @@ -0,0 +1,202 @@ +package gorqlite + +/* + this file has low level stuff: + + rqliteApiGet() + rqliteApiPost() + + There is some code duplication between those and they should + probably be combined into one function. + + nothing public here. + +*/ + +import "bytes" +import "encoding/json" +import "errors" +import "fmt" +import "io/ioutil" +import "net/http" +import "time" + +/* ***************************************************************** + + method: rqliteApiGet() - for api_STATUS + + - lowest level interface - does not do any JSON unmarshaling + - handles retries + - handles timeouts + + * *****************************************************************/ + +func (conn *Connection) rqliteApiGet(apiOp apiOperation) ([]byte, error) { + var responseBody []byte + trace("%s: rqliteApiGet() called",conn.ID) + + // only api_STATUS now - maybe someday BACKUP + if ( apiOp != api_STATUS ) { + return responseBody, errors.New("rqliteApiGet() called for invalid api operation") + } + + // just to be safe, check this + peersToTry := conn.cluster.makePeerList() + if ( len(peersToTry) < 1 ) { + return responseBody, errors.New("I don't have any cluster info") + } + trace("%s: I have a peer list %d peers long",conn.ID,len(peersToTry)) + + // failure log is used so that if all peers fail, we can say something + // about why each failed + failureLog := make([]string,0) + +PeerLoop: + for peerNum, peerToTry := range peersToTry { + trace("%s: attemping to contact peer %d",conn.ID,peerNum) + // docs say default GET policy is up to 10 follows automatically + url := conn.assembleURL(api_STATUS,peerToTry) + req, err := http.NewRequest("GET",url,nil) + if ( err != nil ) { + trace("%s: got error '%s' doing http.NewRequest", conn.ID,err.Error()) + failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error())) + continue PeerLoop + } + trace("%s: http.NewRequest() OK") + req.Header.Set("Content-Type","application/json") + client := &http.Client{} + client.Timeout = time.Duration(conn.timeout) * time.Second + response, err := client.Do(req) + if ( err != nil ) { + trace("%s: got error '%s' doing client.Do", conn.ID,err.Error()) + failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error())) + continue PeerLoop + } + defer response.Body.Close() + trace("%s: client.Do() OK") + responseBody, err := ioutil.ReadAll(response.Body) + if ( err != nil ) { + trace("%s: got error '%s' doing ioutil.ReadAll", conn.ID,err.Error()) + failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error())) + continue PeerLoop + } + trace("%s: ioutil.ReadAll() OK") + if ( response.Status != "200 OK" ) { + trace("%s: got code %s",conn.ID,response.Status) + failureLog = append(failureLog,fmt.Sprintf("%s failed, got: %s",url,response.Status)) + continue PeerLoop + } + // if we got here, we succeeded + trace("%s: api call OK, returning",conn.ID) + return responseBody, nil + } + + // if we got here, all peers failed. Let's build a verbose error message + var stringBuffer bytes.Buffer + stringBuffer.WriteString("tried all peers unsuccessfully. here are the results:\n") + for n, v := range failureLog { + stringBuffer.WriteString(fmt.Sprintf(" peer #%d: %s\n",n,v)) + } + return responseBody, errors.New(stringBuffer.String()) +} + +/* ***************************************************************** + + method: rqliteApiPost() - for api_QUERY and api_WRITE + + - lowest level interface - does not do any JSON unmarshaling + - handles 301s, etc. + - handles retries + - handles timeouts + + it is called with an apiOperation type because the URL it will use varies + depending on the API operation type (api_QUERY vs. api_WRITE) + + * *****************************************************************/ + +func (conn *Connection) rqliteApiPost (apiOp apiOperation, sqlStatements []string) ([]byte, error) { + var responseBody []byte + + switch (apiOp) { + case api_QUERY: + trace("%s: rqliteApiGet() post called for a QUERY of %d statements",conn.ID,len(sqlStatements)) + case api_WRITE: + trace("%s: rqliteApiGet() post called for a QUERY of %d statements",conn.ID,len(sqlStatements)) + default: + return responseBody, errors.New("weird! called for an invalid apiOperation in rqliteApiPost()") + } + + // jsonify the statements. not really needed in the + // case of api_STATUS but doesn't hurt + + jStatements , err := json.Marshal(sqlStatements) + if ( err != nil ) { return nil, err } + + // just to be safe, check this + peersToTry := conn.cluster.makePeerList() + if ( len(peersToTry) < 1 ) { + return responseBody, errors.New("I don't have any cluster info") + } + + // failure log is used so that if all peers fail, we can say something + // about why each failed + failureLog := make([]string,0) + +PeerLoop: + for peerNum, peer := range peersToTry { + trace("%s: trying peer #%d",conn.ID,peerNum) + + // we're doing a post, and the RFCs say that if you get a 301, it's not + // automatically followed, so we have to do that ourselves + + responseStatus := "Haven't Tried Yet" + var url string + for ( responseStatus == "Haven't Tried Yet" || responseStatus == "301 Moved Permanently" ) { + url = conn.assembleURL(apiOp,peer) + req, err := http.NewRequest("POST",url,bytes.NewBuffer(jStatements)) + if ( err != nil ) { + trace("%s: got error '%s' doing http.NewRequest", conn.ID,err.Error()) + failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error())) + continue PeerLoop + } + req.Header.Set("Content-Type","application/json") + client := &http.Client{} + response, err := client.Do(req) + if ( err != nil ) { + trace("%s: got error '%s' doing client.Do", conn.ID,err.Error()) + failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error())) + continue PeerLoop + } + defer response.Body.Close() + responseBody, err = ioutil.ReadAll(response.Body) + if ( err != nil ) { + trace("%s: got error '%s' doing ioutil.ReadAll", conn.ID,err.Error()) + failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error())) + continue PeerLoop + } + responseStatus = response.Status + if ( responseStatus == "301 Moved Permanently" ) { + v := response.Header["Location"] + failureLog = append(failureLog,fmt.Sprintf("%s redirected me to %s",url,v[0])) + url = v[0] + continue PeerLoop + } else if ( responseStatus == "200 OK" ) { + trace("%s: api call OK, returning",conn.ID) + return responseBody, nil + } else { + trace("%s: got error in responseStatus: %s", conn.ID, responseStatus) + failureLog = append(failureLog,fmt.Sprintf("%s failed, got: %s",url,response.Status)) + continue PeerLoop + } + } + } + + // if we got here, all peers failed. Let's build a verbose error message + var stringBuffer bytes.Buffer + stringBuffer.WriteString("tried all peers unsuccessfully. here are the results:\n") + for n, v := range failureLog { + stringBuffer.WriteString(fmt.Sprintf(" peer #%d: %s\n",n,v)) + } + return responseBody, errors.New(stringBuffer.String()) +} + diff --git a/vendor/github.com/Xe/gorqlite/cluster.go b/vendor/github.com/Xe/gorqlite/cluster.go new file mode 100644 index 0000000..b3928d2 --- /dev/null +++ b/vendor/github.com/Xe/gorqlite/cluster.go @@ -0,0 +1,221 @@ +package gorqlite + +/* + this file holds most of the cluster-related stuff: + + types: + peer + rqliteCluster + Connection methods: + assembleURL (from a peer) + updateClusterInfo (does the full cluster discovery via status) +*/ + +/* ***************************************************************** + + imports + + * *****************************************************************/ + +import "bytes" +import "encoding/json" +import "errors" +import "fmt" +import "strings" + +//import "os" +//import "reflect" + +/* ***************************************************************** + + type: peer + + this is an internal type to abstact peer info. + + note that hostname is sometimes used for "has this struct been + inialized" checks. + + * *****************************************************************/ + + +type peer struct { + hostname string // hostname or "localhost" + port string // "4001" or port, only ever used as a string +} + +func (p *peer) String() string { + return fmt.Sprintf("%s:%s",p.hostname,p.port) +} + +/* ***************************************************************** + + type: rqliteCluster + + internal type that abstracts the full cluster state (leader, peers) + + * *****************************************************************/ + +type rqliteCluster struct { + leader peer + otherPeers []peer + conn *Connection +} + +/* ***************************************************************** + + method: rqliteCluster.makePeerList() + + in the api calls, we'll want to try the leader first, then the other + peers. to make looping easy, this function returns a list of peers + in the order the try them: leader, other peer, other peer, etc. + + * *****************************************************************/ + +func (rc *rqliteCluster) makePeerList() []peer { + trace("%s: makePeerList() called",rc.conn.ID) + var peerList []peer + peerList = append(peerList,rc.leader) + for _, p := range rc.otherPeers { + peerList = append(peerList,p) + } + + trace("%s: makePeerList() returning this list:",rc.conn.ID) + for n, v := range peerList { + trace("%s: makePeerList() peer %d -> %s",rc.conn.ID,n,v.hostname + ":" + v.port) + } + + return peerList +} + +/* ***************************************************************** + + method: Connection.assembleURL() + + tell it what peer to talk to and what kind of API operation you're + making, and it will return the full URL, from start to finish. + e.g.: + + https://mary:secret2@server1.example.com:1234/db/query?transaction&level=strong + + note: this func needs to live at the Connection level because the + Connection holds the username, password, consistencyLevel, etc. + + * *****************************************************************/ + +func (conn *Connection) assembleURL(apiOp apiOperation, p peer) string { + var stringBuffer bytes.Buffer + + if ( conn.wantsHTTPS == true ) { + stringBuffer.WriteString("https") + } else { + stringBuffer.WriteString("http") + } + stringBuffer.WriteString("://") + if ( conn.username != "" && conn.password != "" ) { + stringBuffer.WriteString(conn.username) + stringBuffer.WriteString(":") + stringBuffer.WriteString(conn.password) + stringBuffer.WriteString("@") + } + stringBuffer.WriteString(p.hostname) + stringBuffer.WriteString(":") + stringBuffer.WriteString(p.port) + + switch apiOp { + case api_STATUS: + stringBuffer.WriteString("/status") + case api_QUERY: + stringBuffer.WriteString("/db/query") + case api_WRITE: + stringBuffer.WriteString("/db/execute") + } + + if ( apiOp == api_QUERY || apiOp == api_WRITE ) { + stringBuffer.WriteString("?timings&transaction&level=") + stringBuffer.WriteString(consistencyLevelNames[conn.consistencyLevel]) + } + + switch apiOp { + case api_QUERY: + trace("%s: assembled URL for an api_QUERY: %s",conn.ID,stringBuffer.String()) + case api_STATUS: + trace("%s: assembled URL for an api_STATUS: %s",conn.ID,stringBuffer.String()) + case api_WRITE: + trace("%s: assembled URL for an api_WRITE: %s",conn.ID,stringBuffer.String()) + } + + return stringBuffer.String() +} + +/* ***************************************************************** + + method: Connection.updateClusterInfo() + + upon invocation, updateClusterInfo() completely erases and refreshes + the Connection's cluster info, replacing its rqliteCluster object + with current info. + + the web heavy lifting (retrying, etc.) is done in rqliteApiGet() + + * *****************************************************************/ + +func (conn *Connection) updateClusterInfo() error { + trace("%s: updateClusterInfo() called",conn.ID) + + // start with a fresh new cluster + var rc rqliteCluster + rc.conn = conn + + responseBody, err := conn.rqliteApiGet(api_STATUS) + if ( err != nil ) { return err } + trace("%s: updateClusterInfo() back from api call OK",conn.ID) + + sections := make(map[string]interface{}) + err = json.Unmarshal(responseBody,§ions) + if ( err != nil ) { return err } + sMap := sections["store"].(map[string]interface{}) + leaderRaftAddr := sMap["leader"].(string) + trace("%s: leader from store section is %s",conn.ID,leaderRaftAddr) + + // leader in this case is the RAFT address + // we want the HTTP address, so we'll use this as + // a key as we sift through APIPeers + + meta := sMap["meta"].(map[string]interface{}) + apiPeers := meta["APIPeers"].(map[string]interface{}) + + for raftAddr, httpAddr := range apiPeers { + trace("%s: examining httpAddr %s",conn.ID,httpAddr) + + /* httpAddr are usually hostname:port */ + var p peer + parts := strings.Split(httpAddr.(string),":") + p.hostname = parts[0] + p.port = parts[1] + + // so is this the leader? + if ( leaderRaftAddr == raftAddr ) { + trace ("%s: found leader at %s",conn.ID,httpAddr) + rc.leader = p + } else { + rc.otherPeers = append(rc.otherPeers, p) + } + } + + if ( rc.leader.hostname == "" ) { + return errors.New("could not determine leader from API status call") + } + + // dump to trace + trace("%s: here is my cluster config:",conn.ID) + trace("%s: leader : %s",conn.ID,rc.leader.String()) + for n, v := range rc.otherPeers { + trace("%s: otherPeer #%d: %s",conn.ID,n,v.String()) + } + + // now make it official + conn.cluster = rc + + return nil +} + diff --git a/vendor/github.com/Xe/gorqlite/conn.go b/vendor/github.com/Xe/gorqlite/conn.go new file mode 100644 index 0000000..bde7819 --- /dev/null +++ b/vendor/github.com/Xe/gorqlite/conn.go @@ -0,0 +1,303 @@ +package gorqlite + +/* + this file contains some high-level Connection-oriented stuff +*/ + +/* ***************************************************************** + + imports + + * *****************************************************************/ + +import "errors" +import "fmt" +import "io" +import "net" +import nurl "net/url" +import "strings" + +var errClosed = errors.New("gorqlite: connection is closed") +var traceOut io.Writer + +// defaults to false. This is used in trace() to quickly +// return if tracing is off, so that we don't do a perhaps +// expensive Sprintf() call only to send it to Discard + +var wantsTrace bool + +/* ***************************************************************** + + type: Connection + + * *****************************************************************/ + +/* + The connection abstraction. Note that since rqlite is stateless, + there really is no "connection". However, this type holds + information such as the current leader, peers, connection + string to build URLs, etc. + + Connections are assigned a "connection ID" which is a pseudo-UUID + for connection identification in trace output only. This helps + sort out what's going on if you have multiple connections going + at once. It's generated using a non-standards-or-anything-else-compliant + function that uses crypto/rand to generate 16 random bytes. + + Note that the Connection objection holds info on all peers, gathered + at time of Open() from the node specified. +*/ + +type Connection struct { + + cluster rqliteCluster + + /* + name type default + */ + + username string // username or "" + password string // username or "" + consistencyLevel consistencyLevel // WEAK + wantsHTTPS bool // false unless connection URL is https + + // variables below this line need to be initialized in Open() + + timeout int // 10 + hasBeenClosed bool // false + ID string // generated in init() +} + +/* ***************************************************************** + + method: Connection.Close() + + * *****************************************************************/ + +func (conn *Connection) Close() { + conn.hasBeenClosed = true + trace("%s: %s",conn.ID,"closing connection") +} + +/* ***************************************************************** + + method: Connection.ConsistencyLevel() + + * *****************************************************************/ + +func (conn *Connection) ConsistencyLevel() (string, error) { + if ( conn.hasBeenClosed) { + return "", errClosed + } + return consistencyLevelNames[conn.consistencyLevel], nil +} + +/* ***************************************************************** + + method: Connection.Leader() + + * *****************************************************************/ + +func (conn *Connection) Leader() (string, error) { + if ( conn.hasBeenClosed) { + return "", errClosed + } + trace("%s: Leader(), calling updateClusterInfo()",conn.ID) + err := conn.updateClusterInfo() + if ( err != nil ) { + trace("%s: Leader() got error from updateClusterInfo(): %s",conn.ID,err.Error()) + return "", err + } else { + trace("%s: Leader(), updateClusterInfo() OK",conn.ID) + } + return conn.cluster.leader.String(), nil +} + +/* ***************************************************************** + + method: Connection.Peers() + + * *****************************************************************/ + +func (conn *Connection) Peers() ([]string, error) { + if ( conn.hasBeenClosed) { + var ans []string + return ans, errClosed + } + plist := make ([]string,0) + + trace("%s: Peers(), calling updateClusterInfo()",conn.ID) + err := conn.updateClusterInfo() + if ( err != nil ) { + trace("%s: Peers() got error from updateClusterInfo(): %s",conn.ID,err.Error()) + return plist, err + } else { + trace("%s: Peers(), updateClusterInfo() OK",conn.ID) + } + plist = append(plist,conn.cluster.leader.String()) + for _, p := range conn.cluster.otherPeers { + plist = append(plist,p.String()) + } + return plist, nil +} + +/* ***************************************************************** + + method: Connection.SetConsistencyLevel() + + * *****************************************************************/ + +func (conn *Connection) SetConsistencyLevel(levelDesired string) error { + if ( conn.hasBeenClosed) { + return errClosed + } + _, ok := consistencyLevels[levelDesired] + if ( ok ) { + conn.consistencyLevel = consistencyLevels[levelDesired] + return nil + } + return errors.New(fmt.Sprintf("unknown consistency level: %s",levelDesired)) +} + +/* ***************************************************************** + + method: Connection.initConnection() + + * *****************************************************************/ + +/* + initConnection takes the initial connection URL specified by + the user, and parses it into a peer. This peer is assumed to + be the leader. The next thing Open() does is updateClusterInfo() + so the truth will be revealed soon enough. + + initConnection() does not talk to rqlite. It only parses the + connection URL and prepares the new connection for work. + + URL format: + + http[s]://${USER}:${PASSWORD}@${HOSTNAME}:${PORT}/db?[OPTIONS] + + Examples: + + https://mary:secret2@localhost:4001/db + https://mary:secret2@server1.example.com:4001/db?level=none + https://mary:secret2@server2.example.com:4001/db?level=weak + https://mary:secret2@localhost:2265/db?level=strong + + to use default connection to localhost:4001 with no auth: + http:// + https:// + + guaranteed map fields - will be set to "" if not specified + + field name default if not specified + + username "" + password "" + hostname "localhost" + port "4001" + consistencyLevel "weak" +*/ + +func (conn *Connection) initConnection(url string) error { + + // do some sanity checks. You know users. + + if ( len(url) < 7 ) { + return errors.New("url specified is impossibly short") + } + + if ( strings.HasPrefix(url,"http") == false ) { + return errors.New("url does not start with 'http'") + } + + u, err := nurl.Parse(url) + if ( err != nil ) { + return err + } + trace("%s: net.url.Parse() OK",conn.ID) + + if ( u.Scheme == "https" ) { + conn.wantsHTTPS = true + } + + // specs say Username() is always populated even if empty + + if u.User == nil { + conn.username = "" + conn.password = "" + } else { + // guaranteed, but could be empty which is ok + conn.username = u.User.Username() + + // not guaranteed, so test if set + pass, isset := u.User.Password() + if ( isset ) { + conn.password = pass + } else { + conn.password = "" + } + } + + if ( u.Host == "" ) { + conn.cluster.leader.hostname = "localhost" + } else { + conn.cluster.leader.hostname = u.Host + } + + if ( u.Host == "" ) { + conn.cluster.leader.hostname = "localhost" + conn.cluster.leader.port = "4001" + } else { + // SplitHostPort() should only return an error if there is no host port. + // I think. + h, p, err := net.SplitHostPort(u.Host) + if ( err != nil ) { + conn.cluster.leader.hostname = u.Host + } else { + conn.cluster.leader.hostname = h + conn.cluster.leader.port = p + } + } + + /* + + at the moment, the only allowed query is "level=" with + the desired consistency level + + */ + + // default + conn.consistencyLevel = cl_WEAK + + if ( u.RawQuery != "" ) { + if ( u.RawQuery == "level=weak") { + // that's ok but nothing to do + } else if ( u.RawQuery == "level=strong" ) { + conn.consistencyLevel = cl_STRONG + } else if ( u.RawQuery == "level=none" ) { // the fools! + conn.consistencyLevel = cl_NONE + } else { + return errors.New("don't know what to do with this query: " + u.RawQuery) + } + } + + trace("%s: parseDefaultPeer() is done:",conn.ID) + if ( conn.wantsHTTPS == true ) { + trace("%s: %s -> %s",conn.ID,"wants https?","yes") + } else { + trace("%s: %s -> %s",conn.ID,"wants https?","no") + } + trace("%s: %s -> %s",conn.ID,"username",conn.username) + trace("%s: %s -> %s",conn.ID,"password",conn.password) + trace("%s: %s -> %s",conn.ID,"hostname",conn.cluster.leader.hostname) + trace("%s: %s -> %s",conn.ID,"port",conn.cluster.leader.port) + trace("%s: %s -> %s",conn.ID,"consistencyLevel",consistencyLevelNames[conn.consistencyLevel]) + + conn.cluster.conn = conn + + return nil +} + + diff --git a/vendor/github.com/Xe/gorqlite/gorqlite.go b/vendor/github.com/Xe/gorqlite/gorqlite.go new file mode 100644 index 0000000..9752053 --- /dev/null +++ b/vendor/github.com/Xe/gorqlite/gorqlite.go @@ -0,0 +1,187 @@ +/* + gorqlite + A golang database/sql driver for rqlite, the distributed consistent sqlite. + + Copyright (c)2016 andrew fabbro (andrew@fabbro.org) + + See LICENSE.md for license. tl;dr: MIT. Conveniently, the same licese as rqlite. + + Project home page: https://github.com/raindo308/gorqlite + + Learn more about rqlite at: https://github.com/rqlite/rqlite + + */ +package gorqlite + +/* + this file contains package-level stuff: + consts + init() + Open, TraceOn(), TraceOff() +*/ + +import "crypto/rand" +import "fmt" +import "io" +import "io/ioutil" +import "strings" + +/* ***************************************************************** + + const + + * *****************************************************************/ + +type consistencyLevel int +const ( + cl_NONE consistencyLevel = iota + cl_WEAK + cl_STRONG +) +// used in several places, actually +var consistencyLevelNames map[consistencyLevel]string +var consistencyLevels map[string]consistencyLevel + +type apiOperation int +const ( + api_QUERY apiOperation = iota + api_STATUS + api_WRITE +) + +/* ***************************************************************** + + init() + + * *****************************************************************/ + +func init() { + traceOut = ioutil.Discard + + consistencyLevelNames = make(map[consistencyLevel]string) + consistencyLevelNames[cl_NONE] = "none" + consistencyLevelNames[cl_WEAK] = "weak" + consistencyLevelNames[cl_STRONG] = "strong" + + consistencyLevels = make(map[string]consistencyLevel) + consistencyLevels["none"] = cl_NONE + consistencyLevels["weak"] = cl_WEAK + consistencyLevels["strong"] = cl_STRONG + +} + + +/* ***************************************************************** +Open() creates and returns a "connection" to rqlite. + +Since rqlite is stateless, there is no actual connection. Open() creates and initializes a gorqlite Connection type, which represents various config information. + +The URL should be in a form like this: + + http://localhost:4001 + + http:// default, no auth, localhost:4001 + https:// default, no auth, localhost:4001, using https + + http://localhost:1234 + http://mary:secret2@localhost:1234 + + https://mary:secret2@somewhere.example.com:1234 + https://mary:secret2@somewhere.example.com // will use 4001 + * *****************************************************************/ +func Open(connURL string) (Connection, error) { + var conn Connection + + // generate our uuid for trace + b := make([]byte, 16) + _, err := rand.Read(b) + if err != nil { + return conn, err + } + conn.ID = fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:]) + trace("%s: Open() called for url: %s",conn.ID,connURL) + + // set defaults + conn.timeout = 10 + conn.hasBeenClosed = false + + // parse the URL given + err = conn.initConnection(connURL) + if ( err != nil ) { + return conn, err + } + + // call updateClusterInfo() to populate the cluster + // also tests the user's default + + err = conn.updateClusterInfo() + + // and the err from updateClusterInfo() will be our err as well + return conn, err +} + +/* ***************************************************************** + + func: trace() + + adds a message to the trace output + + not a public function. we (inside) can add - outside they can + only see. + + Call trace as: Sprintf pattern , args... + + This is done so that the more expensive Sprintf() stuff is + done only if truly needed. When tracing is off, calls to + trace() just hit a bool check and return. If tracing is on, + then the Sprintfing is done at a leisurely pace because, well, + we're tracing. + + Premature optimization is the root of all evil, so this is + probably sinful behavior. + + Don't put a \n in your Sprintf pattern becuase trace() adds one + + * *****************************************************************/ + +func trace(pattern string, args ...interface{}) { + // don't do the probably expensive Sprintf() if not needed + if ( wantsTrace == false ) { + return + } + + // this could all be made into one long statement but we have + // compilers to do such things for us. let's sip a mint julep + // and spell this out in glorious exposition. + + // make sure there is one and only one newline + nlPattern := strings.TrimSpace(pattern) + "\n" + msg := fmt.Sprintf(nlPattern,args...) + traceOut.Write( []byte( msg ) ) +} + +/* + TraceOn() + + Turns on tracing output to the io.Writer of your choice. + + Trace output is very detailed and verbose, as you might expect. + + Normally, you should run with tracing off, as it makes absolutely + no concession to performance and is intended for debugging/dev use. +*/ +func TraceOn(w io.Writer) { + traceOut = w + wantsTrace = true +} + +/* + TraceOff() + + Turns off tracing output. Once you call TraceOff(), no further + info is sent to the io.Writer, unless it is TraceOn'd again. +*/ +func TraceOff() { + wantsTrace = false + traceOut = ioutil.Discard +} diff --git a/vendor/github.com/Xe/gorqlite/prepared_statement.go b/vendor/github.com/Xe/gorqlite/prepared_statement.go new file mode 100644 index 0000000..2ef43ca --- /dev/null +++ b/vendor/github.com/Xe/gorqlite/prepared_statement.go @@ -0,0 +1,54 @@ +package gorqlite + +import ( + "fmt" + "strings" +) + +// EscapeString sql-escapes a string. +func EscapeString(value string) string { + replace := [][2]string{ + {`\`, `\\`}, + {`\0`, `\\0`}, + {`\n`, `\\n`}, + {`\r`, `\\r`}, + {`"`, `\"`}, + {`'`, `\'`}, + } + + for _, val := range replace { + value = strings.Replace(value, val[0], val[1], -1) + } + + return value +} + +// PreparedStatement is a simple wrapper around fmt.Sprintf for prepared SQL +// statements. +type PreparedStatement struct { + body string +} + +// NewPreparedStatement takes a sprintf syntax SQL query for later binding of +// parameters. +func NewPreparedStatement(body string) PreparedStatement { + return PreparedStatement{body: body} +} + +// Bind takes arguments and SQL-escapes them, then calling fmt.Sprintf. +func (p PreparedStatement) Bind(args ...interface{}) string { + var spargs []interface{} + + for _, arg := range args { + switch arg.(type) { + case string: + spargs = append(spargs, `'`+EscapeString(arg.(string))+`'`) + case fmt.Stringer: + spargs = append(spargs, `'`+EscapeString(arg.(fmt.Stringer).String())+`'`) + default: + spargs = append(spargs, arg) + } + } + + return fmt.Sprintf(p.body, spargs...) +} diff --git a/vendor/github.com/Xe/gorqlite/query.go b/vendor/github.com/Xe/gorqlite/query.go new file mode 100644 index 0000000..a9a82b6 --- /dev/null +++ b/vendor/github.com/Xe/gorqlite/query.go @@ -0,0 +1,396 @@ +package gorqlite + +import "errors" +import "fmt" +import "encoding/json" + +/* ***************************************************************** + + method: Connection.Query() + + This is the JSON we get back: + +{ + "results": [ + { + "columns": [ + "id", + "name" + ], + "types": [ + "integer", + "text" + ], + "values": [ + [ + 1, + "fiona" + ], + [ + 2, + "sinead" + ] + ], + "time": 0.0150043 + } + ], + "time": 0.0220043 +} + + or + +{ + "results": [ + { + "columns": [ + "id", + "name" + ], + "types": [ + "number", + "text" + ], + "values": [ + [ + null, + "Hulk" + ] + ], + "time": 4.8958e-05 + }, + { + "columns": [ + "id", + "name" + ], + "types": [ + "number", + "text" + ], + "time": 1.8460000000000003e-05 + } + ], + "time": 0.000134776 +} + + or + +{ + "results": [ + { + "error": "near \"nonsense\": syntax error" + } + ], + "time": 2.478862 +} + + * *****************************************************************/ + +/* +QueryOne() is a convenience method that wraps Query() into a single-statement +method. +*/ +func (conn *Connection) QueryOne(sqlStatement string) (qr QueryResult, err error) { + if ( conn.hasBeenClosed) { + qr.Err = errClosed + return qr, errClosed + } + sqlStatements := make([]string,0) + sqlStatements = append(sqlStatements,sqlStatement) + qra, err := conn.Query(sqlStatements) + return qra[0], err +} + +/* +Query() is used to perform SELECT operations in the database. + +It takes an array of SQL statements and executes them in a single transaction, returning an array of QueryResult vars. +*/ +func (conn *Connection) Query(sqlStatements []string) (results []QueryResult, err error) { + results = make([]QueryResult,0) + + if ( conn.hasBeenClosed) { + var errResult QueryResult + errResult.Err = errClosed + results = append(results,errResult) + return results, errClosed + } + trace("%s: Query() for %d statements",conn.ID,len(sqlStatements)) + + // if we get an error POSTing, that's a showstopper + response, err := conn.rqliteApiPost(api_QUERY,sqlStatements) + if ( err != nil ) { + trace("%s: rqliteApiCall() ERROR: %s",conn.ID,err.Error()) + var errResult QueryResult + errResult.Err = err + results = append(results,errResult) + return results, err + } + trace("%s: rqliteApiCall() OK",conn.ID) + + // if we get an error Unmarshaling, that's a showstopper + var sections map[string]interface{} + err = json.Unmarshal(response,§ions) + if ( err != nil ) { + trace("%s: json.Unmarshal() ERROR: %s",conn.ID,err.Error()) + var errResult QueryResult + errResult.Err = err + results = append(results,errResult) + return results, err + } + + /* + at this point, we have a "results" section and + a "time" section. we can igore the latter. + */ + + resultsArray := sections["results"].([]interface{}) + trace("%s: I have %d result(s) to parse",conn.ID,len(resultsArray)) + + numStatementErrors := 0 + for n, r := range resultsArray { + trace("%s: parsing result %d",conn.ID,n) + var thisQR QueryResult + thisQR.conn = conn + + // r is a hash with columns, types, values, and time + thisResult := r.(map[string]interface{}) + + // did we get an error? + _, ok := thisResult["error"] + if ok { + trace("%s: have an error on this result: %s",conn.ID,thisResult["error"].(string)) + thisQR.Err = errors.New(thisResult["error"].(string)) + results = append(results,thisQR) + numStatementErrors++ + continue + } + + // time is a float64 + thisQR.Timing = thisResult["time"].(float64) + + // column & type are an array of strings + c := thisResult["columns"].([]interface{}) + t := thisResult["types"].([]interface{}) + for i := 0; i < len(c) ; i++ { + thisQR.columns = append(thisQR.columns,c[i].(string)) + thisQR.types = append(thisQR.types,t[i].(string)) + } + + // and values are an array of arrays + if ( thisResult["values"] != nil ) { + thisQR.values = thisResult["values"].([]interface{}) + } else { + trace("%s: fyi, no values this query",conn.ID) + } + + thisQR.rowNumber = -1 + + trace("%s: this result (#col,time) %d %f",conn.ID,len(thisQR.columns),thisQR.Timing) + results = append(results,thisQR) + } + + trace("%s: finished parsing, returning %d results",conn.ID,len(results)) + + if ( numStatementErrors > 0 ) { + return results, errors.New(fmt.Sprintf("there were %d statement errors",numStatementErrors)) + } else { + return results, nil + } +} + +/* ***************************************************************** + + type: QueryResult + + * *****************************************************************/ + +/* +A QueryResult type holds the results of a call to Query(). You could think of it as a rowset. + +So if you were to query: + + SELECT id, name FROM some_table; + +then a QueryResult would hold any errors from that query, a list of columns and types, and the actual row values. + +Query() returns an array of QueryResult vars, while QueryOne() returns a single variable. +*/ +type QueryResult struct { + conn *Connection + Err error + columns []string + types []string + Timing float64 + values []interface{} + rowNumber int64 +} + +// these are done as getters rather than as public +// variables to prevent monkey business by the user +// that would put us in an inconsistent state + +/* ***************************************************************** + + method: QueryResult.Columns() + + * *****************************************************************/ + +/* +Columns returns a list of the column names for this QueryResult. +*/ +func (qr *QueryResult) Columns() []string { + return qr.columns +} + +/* ***************************************************************** + + method: QueryResult.Map() + + * *****************************************************************/ + +/* +Map() returns the current row (as advanced by Next()) as a map[string]interface{} + +The key is a string corresponding to a column name. +The value is the corresponding column. + +Note that only json values are supported, so you will need to type the interface{} accordingly. +*/ +func (qr *QueryResult) Map() (map[string]interface{}, error) { + trace("%s: Map() called for row %d", qr.conn.ID, qr.rowNumber) + ans := make(map[string]interface{}) + + if ( qr.rowNumber == -1 ) { + return ans, errors.New("you need to Next() before you Map(), sorry, it's complicated") + } + + thisRowValues := qr.values[qr.rowNumber].([]interface {}) + for i := 0; i= int64(len(qr.values) - 1 )) { + return false + } + + qr.rowNumber += 1 + return true +} + +/* ***************************************************************** + + method: QueryResult.NumRows() + + * *****************************************************************/ + +/* +NumRows() returns the number of rows returned by the query. +*/ +func (qr *QueryResult) NumRows() int64 { + return int64(len(qr.values)) +} + +/* ***************************************************************** + + method: QueryResult.RowNumber() + + * *****************************************************************/ + +/* +RowNumber() returns the current row number as Next() iterates through the result's rows. +*/ +func (qr *QueryResult) RowNumber() int64 { + return qr.rowNumber +} + +/* ***************************************************************** + + method: QueryResult.Scan() + + * *****************************************************************/ + +/* +Scan() takes a list of pointers and then updates them to reflect he current row's data. + +Note that only the following data types are used, and they +are a subset of the types JSON uses: + string, for JSON strings + float64, for JSON numbers + int64, as a convenient extension + nil for JSON null + +booleans, JSON arrays, and JSON objects are not supported, +since sqlite does not support them. +*/ +func (qr *QueryResult) Scan(dest... interface{}) error { + trace("%s: Scan() called for %d vars", qr.conn.ID,len(dest)) + + if ( qr.rowNumber == -1 ) { + return errors.New("you need to Next() before you Scan(), sorry, it's complicated") + } + + if ( len(dest) != len(qr.columns) ) { + return errors.New(fmt.Sprintf("expected %d columns but got %d vars\n", len(qr.columns), len(dest))) + } + + thisRowValues := qr.values[qr.rowNumber].([]interface {}) + for n, d := range dest { + switch d.(type) { + case *int64: + f := int64(thisRowValues[n].(float64)) + *d.(*int64) = f + case *float64: + f := float64(thisRowValues[n].(float64)) + *d.(*float64) = f + case *string: + s := string(thisRowValues[n].(string)) + *d.(*string) = s + default: + return errors.New(fmt.Sprintf("unknown destination type to scan into in variable #%d",n)) + } + } + + return nil +} + +/* ***************************************************************** + + method: QueryResult.Types() + + * *****************************************************************/ + +/* +Types() returns an array of the column's types. + +Note that sqlite will repeat the type you tell it, but in many cases, it's ignored. So you can initialize a column as CHAR(3) but it's really TEXT. See https://www.sqlite.org/datatype3.html + +This info may additionally conflict with the reality that your data is being JSON encoded/decoded. +*/ +func (qr *QueryResult) Types() []string { + return qr.types +} + diff --git a/vendor/github.com/Xe/gorqlite/write.go b/vendor/github.com/Xe/gorqlite/write.go new file mode 100644 index 0000000..724a25e --- /dev/null +++ b/vendor/github.com/Xe/gorqlite/write.go @@ -0,0 +1,179 @@ +package gorqlite + +/* + this file has + Write() + WriteResult and its methods +*/ + +import "errors" +import "encoding/json" +import "fmt" + +/* ***************************************************************** + + method: Connection.Write() + + This is the JSON we get back: + +{ + "results": [ + { + "last_insert_id": 1, + "rows_affected": 1, + "time": 0.00759015 + }, + { + "last_insert_id": 2, + "rows_affected": 1, + "time": 0.00669015 + } + ], + "time": 0.869015 +} + + or + +{ + "results": [ + { + "error": "table foo already exists" + } + ], + "time": 0.18472685400000002 +} + + We don't care about the overall time. We just want the results, + so we'll take those and put each into a WriteResult + + Because the results themselves are smaller than the JSON + (which repeats strings like "last_insert_id" frequently), + we'll just parse everything at once. + + * *****************************************************************/ + +/* +WriteOne() is a convenience method that wraps Write() into a single-statement +method. +*/ + +func (conn *Connection) WriteOne(sqlStatement string) (wr WriteResult, err error) { + if ( conn.hasBeenClosed) { + wr.Err = errClosed + return wr, errClosed + } + sqlStatements := make([]string,0) + sqlStatements = append(sqlStatements,sqlStatement) + wra , err := conn.Write(sqlStatements) + return wra[0], err +} + +/* +Write() is used to perform DDL/DML in the database. ALTER, CREATE, DELETE, DROP, INSERT, UPDATE, etc. all go through Write(). + +Write() takes an array of SQL statements, and returns an equal-sized array of WriteResults, each corresponding to the SQL statement that produced it. + +All statements are executed as a single transaction. + +Write() returns an error if one is encountered during its operation. If it's something like a call to the rqlite API, then it'll return that error. If one statement out of several has an error, it will return a generic "there were %d statement errors" and you'll have to look at the individual statement's Err for more info. +*/ +func (conn *Connection) Write(sqlStatements []string) (results []WriteResult, err error) { + results = make([]WriteResult,0) + + if ( conn.hasBeenClosed) { + var errResult WriteResult + errResult.Err = errClosed + results = append(results,errResult) + return results, errClosed + } + + trace("%s: Write() for %d statements",conn.ID,len(sqlStatements)) + + response, err := conn.rqliteApiPost(api_WRITE,sqlStatements) + if ( err != nil ) { + trace("%s: rqliteApiCall() ERROR: %s",conn.ID,err.Error()) + var errResult WriteResult + errResult.Err = err + results = append(results,errResult) + return results, err + } + trace("%s: rqliteApiCall() OK",conn.ID) + + var sections map[string]interface{} + err = json.Unmarshal(response,§ions) + if ( err != nil ) { + trace("%s: json.Unmarshal() ERROR: %s",conn.ID,err.Error()) + var errResult WriteResult + errResult.Err = err + results = append(results,errResult) + return results, err + } + + /* + at this point, we have a "results" section and + a "time" section. we can igore the latter. + */ + + resultsArray := sections["results"].([]interface{}) + trace("%s: I have %d result(s) to parse",conn.ID,len(resultsArray)) + numStatementErrors := 0 + for n, k := range resultsArray { + trace("%s: starting on result %d",conn.ID,n) + thisResult := k.(map[string]interface{}) + + var thisWR WriteResult + thisWR.conn = conn + + // did we get an error? + _, ok := thisResult["error"] + if ok { + trace("%s: have an error on this result: %s",conn.ID,thisResult["error"].(string)) + thisWR.Err = errors.New(thisResult["error"].(string)) + results = append(results,thisWR) + numStatementErrors += 1 + continue + } + + _, ok = thisResult["last_insert_id"] + if ok { + thisWR.LastInsertID = int64(thisResult["last_insert_id"].(float64)) + } + + _, ok = thisResult["rows_affected"] // could be zero for a CREATE + if ok { + thisWR.RowsAffected = int64(thisResult["rows_affected"].(float64)) + } + thisWR.Timing = thisResult["time"].(float64) + + trace("%s: this result (LII,RA,T): %d %d %f",conn.ID,thisWR.LastInsertID,thisWR.RowsAffected,thisWR.Timing) + results = append(results,thisWR) + } + + trace("%s: finished parsing, returning %d results",conn.ID,len(results)) + + if ( numStatementErrors > 0 ) { + return results, errors.New(fmt.Sprintf("there were %d statement errors",numStatementErrors)) + } else { + return results, nil + } +} + +/* ***************************************************************** + + type: WriteResult + + * *****************************************************************/ + +/* +A WriteResult holds the result of a single statement sent to Write(). + +Write() returns an array of WriteResult vars, while WriteOne() returns a single WriteResult. +*/ +type WriteResult struct { + Err error // don't trust the rest if this isn't nil + Timing float64 + RowsAffected int64 // affected by the change + LastInsertID int64 // if relevant, otherwise zero value + conn *Connection +} + diff --git a/vendor/github.com/Xe/uuid/dce.go b/vendor/github.com/Xe/uuid/dce.go new file mode 100644 index 0000000..50a0f2d --- /dev/null +++ b/vendor/github.com/Xe/uuid/dce.go @@ -0,0 +1,84 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "fmt" + "os" +) + +// A Domain represents a Version 2 domain +type Domain byte + +// Domain constants for DCE Security (Version 2) UUIDs. +const ( + Person = Domain(0) + Group = Domain(1) + Org = Domain(2) +) + +// NewDCESecurity returns a DCE Security (Version 2) UUID. +// +// The domain should be one of Person, Group or Org. +// On a POSIX system the id should be the users UID for the Person +// domain and the users GID for the Group. The meaning of id for +// the domain Org or on non-POSIX systems is site defined. +// +// For a given domain/id pair the same token may be returned for up to +// 7 minutes and 10 seconds. +func NewDCESecurity(domain Domain, id uint32) UUID { + uuid := NewUUID() + if uuid != nil { + uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2 + uuid[9] = byte(domain) + binary.BigEndian.PutUint32(uuid[0:], id) + } + return uuid +} + +// NewDCEPerson returns a DCE Security (Version 2) UUID in the person +// domain with the id returned by os.Getuid. +// +// NewDCEPerson(Person, uint32(os.Getuid())) +func NewDCEPerson() UUID { + return NewDCESecurity(Person, uint32(os.Getuid())) +} + +// NewDCEGroup returns a DCE Security (Version 2) UUID in the group +// domain with the id returned by os.Getgid. +// +// NewDCEGroup(Group, uint32(os.Getgid())) +func NewDCEGroup() UUID { + return NewDCESecurity(Group, uint32(os.Getgid())) +} + +// Domain returns the domain for a Version 2 UUID or false. +func (uuid UUID) Domain() (Domain, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return Domain(uuid[9]), true +} + +// Id returns the id for a Version 2 UUID or false. +func (uuid UUID) Id() (uint32, bool) { + if v, _ := uuid.Version(); v != 2 { + return 0, false + } + return binary.BigEndian.Uint32(uuid[0:4]), true +} + +func (d Domain) String() string { + switch d { + case Person: + return "Person" + case Group: + return "Group" + case Org: + return "Org" + } + return fmt.Sprintf("Domain%d", int(d)) +} diff --git a/vendor/github.com/Xe/uuid/doc.go b/vendor/github.com/Xe/uuid/doc.go new file mode 100644 index 0000000..d8bd013 --- /dev/null +++ b/vendor/github.com/Xe/uuid/doc.go @@ -0,0 +1,8 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// The uuid package generates and inspects UUIDs. +// +// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services. +package uuid diff --git a/vendor/github.com/Xe/uuid/hash.go b/vendor/github.com/Xe/uuid/hash.go new file mode 100644 index 0000000..cdd4192 --- /dev/null +++ b/vendor/github.com/Xe/uuid/hash.go @@ -0,0 +1,53 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "crypto/md5" + "crypto/sha1" + "hash" +) + +// Well known Name Space IDs and UUIDs +var ( + NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8") + NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8") + NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8") + NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8") + NIL = Parse("00000000-0000-0000-0000-000000000000") +) + +// NewHash returns a new UUID dervied from the hash of space concatenated with +// data generated by h. The hash should be at least 16 byte in length. The +// first 16 bytes of the hash are used to form the UUID. The version of the +// UUID will be the lower 4 bits of version. NewHash is used to implement +// NewMD5 and NewSHA1. +func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID { + h.Reset() + h.Write(space) + h.Write([]byte(data)) + s := h.Sum(nil) + uuid := make([]byte, 16) + copy(uuid, s) + uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4) + uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant + return uuid +} + +// NewMD5 returns a new MD5 (Version 3) UUID based on the +// supplied name space and data. +// +// NewHash(md5.New(), space, data, 3) +func NewMD5(space UUID, data []byte) UUID { + return NewHash(md5.New(), space, data, 3) +} + +// NewSHA1 returns a new SHA1 (Version 5) UUID based on the +// supplied name space and data. +// +// NewHash(sha1.New(), space, data, 5) +func NewSHA1(space UUID, data []byte) UUID { + return NewHash(sha1.New(), space, data, 5) +} diff --git a/vendor/github.com/Xe/uuid/node.go b/vendor/github.com/Xe/uuid/node.go new file mode 100644 index 0000000..dd0a8ac --- /dev/null +++ b/vendor/github.com/Xe/uuid/node.go @@ -0,0 +1,101 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import "net" + +var ( + interfaces []net.Interface // cached list of interfaces + ifname string // name of interface being used + nodeID []byte // hardware for version 1 UUIDs +) + +// NodeInterface returns the name of the interface from which the NodeID was +// derived. The interface "user" is returned if the NodeID was set by +// SetNodeID. +func NodeInterface() string { + return ifname +} + +// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs. +// If name is "" then the first usable interface found will be used or a random +// Node ID will be generated. If a named interface cannot be found then false +// is returned. +// +// SetNodeInterface never fails when name is "". +func SetNodeInterface(name string) bool { + if interfaces == nil { + var err error + interfaces, err = net.Interfaces() + if err != nil && name != "" { + return false + } + } + + for _, ifs := range interfaces { + if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) { + if setNodeID(ifs.HardwareAddr) { + ifname = ifs.Name + return true + } + } + } + + // We found no interfaces with a valid hardware address. If name + // does not specify a specific interface generate a random Node ID + // (section 4.1.6) + if name == "" { + if nodeID == nil { + nodeID = make([]byte, 6) + } + randomBits(nodeID) + return true + } + return false +} + +// NodeID returns a slice of a copy of the current Node ID, setting the Node ID +// if not already set. +func NodeID() []byte { + if nodeID == nil { + SetNodeInterface("") + } + nid := make([]byte, 6) + copy(nid, nodeID) + return nid +} + +// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes +// of id are used. If id is less than 6 bytes then false is returned and the +// Node ID is not set. +func SetNodeID(id []byte) bool { + if setNodeID(id) { + ifname = "user" + return true + } + return false +} + +func setNodeID(id []byte) bool { + if len(id) < 6 { + return false + } + if nodeID == nil { + nodeID = make([]byte, 6) + } + copy(nodeID, id) + return true +} + +// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is +// not valid. The NodeID is only well defined for version 1 and 2 UUIDs. +func (uuid UUID) NodeID() []byte { + if len(uuid) != 16 { + return nil + } + node := make([]byte, 6) + copy(node, uuid[10:]) + return node +} diff --git a/vendor/github.com/Xe/uuid/time.go b/vendor/github.com/Xe/uuid/time.go new file mode 100644 index 0000000..b9369c2 --- /dev/null +++ b/vendor/github.com/Xe/uuid/time.go @@ -0,0 +1,132 @@ +// Copyright 2014 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" + "sync" + "time" +) + +// A Time represents a time as the number of 100's of nanoseconds since 15 Oct +// 1582. +type Time int64 + +const ( + lillian = 2299160 // Julian day of 15 Oct 1582 + unix = 2440587 // Julian day of 1 Jan 1970 + epoch = unix - lillian // Days between epochs + g1582 = epoch * 86400 // seconds between epochs + g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs +) + +var ( + mu sync.Mutex + lasttime uint64 // last time we returned + clock_seq uint16 // clock sequence for this run + + timeNow = time.Now // for testing +) + +// UnixTime converts t the number of seconds and nanoseconds using the Unix +// epoch of 1 Jan 1970. +func (t Time) UnixTime() (sec, nsec int64) { + sec = int64(t - g1582ns100) + nsec = (sec % 10000000) * 100 + sec /= 10000000 + return sec, nsec +} + +// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and +// adjusts the clock sequence as needed. An error is returned if the current +// time cannot be determined. +func GetTime() (Time, error) { + defer mu.Unlock() + mu.Lock() + return getTime() +} + +func getTime() (Time, error) { + t := timeNow() + + // If we don't have a clock sequence already, set one. + if clock_seq == 0 { + setClockSequence(-1) + } + now := uint64(t.UnixNano()/100) + g1582ns100 + + // If time has gone backwards with this clock sequence then we + // increment the clock sequence + if now <= lasttime { + clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000 + } + lasttime = now + return Time(now), nil +} + +// ClockSequence returns the current clock sequence, generating one if not +// already set. The clock sequence is only used for Version 1 UUIDs. +// +// The uuid package does not use global static storage for the clock sequence or +// the last time a UUID was generated. Unless SetClockSequence a new random +// clock sequence is generated the first time a clock sequence is requested by +// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated +// for +func ClockSequence() int { + defer mu.Unlock() + mu.Lock() + return clockSequence() +} + +func clockSequence() int { + if clock_seq == 0 { + setClockSequence(-1) + } + return int(clock_seq & 0x3fff) +} + +// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to +// -1 causes a new sequence to be generated. +func SetClockSequence(seq int) { + defer mu.Unlock() + mu.Lock() + setClockSequence(seq) +} + +func setClockSequence(seq int) { + if seq == -1 { + var b [2]byte + randomBits(b[:]) // clock sequence + seq = int(b[0])<<8 | int(b[1]) + } + old_seq := clock_seq + clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant + if old_seq != clock_seq { + lasttime = 0 + } +} + +// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in +// uuid. It returns false if uuid is not valid. The time is only well defined +// for version 1 and 2 UUIDs. +func (uuid UUID) Time() (Time, bool) { + if len(uuid) != 16 { + return 0, false + } + time := int64(binary.BigEndian.Uint32(uuid[0:4])) + time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 + time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 + return Time(time), true +} + +// ClockSequence returns the clock sequence encoded in uuid. It returns false +// if uuid is not valid. The clock sequence is only well defined for version 1 +// and 2 UUIDs. +func (uuid UUID) ClockSequence() (int, bool) { + if len(uuid) != 16 { + return 0, false + } + return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true +} diff --git a/vendor/github.com/Xe/uuid/util.go b/vendor/github.com/Xe/uuid/util.go new file mode 100644 index 0000000..de40b10 --- /dev/null +++ b/vendor/github.com/Xe/uuid/util.go @@ -0,0 +1,43 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "io" +) + +// randomBits completely fills slice b with random data. +func randomBits(b []byte) { + if _, err := io.ReadFull(rander, b); err != nil { + panic(err.Error()) // rand should never fail + } +} + +// xvalues returns the value of a byte as a hexadecimal digit or 255. +var xvalues = []byte{ + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, + 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, +} + +// xtob converts the the first two hex bytes of x into a byte. +func xtob(x string) (byte, bool) { + b1 := xvalues[x[0]] + b2 := xvalues[x[1]] + return (b1 << 4) | b2, b1 != 255 && b2 != 255 +} diff --git a/vendor/github.com/Xe/uuid/uuid.go b/vendor/github.com/Xe/uuid/uuid.go new file mode 100644 index 0000000..2920fae --- /dev/null +++ b/vendor/github.com/Xe/uuid/uuid.go @@ -0,0 +1,163 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "bytes" + "crypto/rand" + "fmt" + "io" + "strings" +) + +// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC +// 4122. +type UUID []byte + +// A Version represents a UUIDs version. +type Version byte + +// A Variant represents a UUIDs variant. +type Variant byte + +// Constants returned by Variant. +const ( + Invalid = Variant(iota) // Invalid UUID + RFC4122 // The variant specified in RFC4122 + Reserved // Reserved, NCS backward compatibility. + Microsoft // Reserved, Microsoft Corporation backward compatibility. + Future // Reserved for future definition. +) + +var rander = rand.Reader // random function + +// New returns a new random (version 4) UUID as a string. It is a convenience +// function for NewRandom().String(). +func New() string { + return NewRandom().String() +} + +// Parse decodes s into a UUID or returns nil. Both the UUID form of +// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded. +func Parse(s string) UUID { + if len(s) == 36+9 { + if strings.ToLower(s[:9]) != "urn:uuid:" { + return nil + } + s = s[9:] + } else if len(s) != 36 { + return nil + } + if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' { + return nil + } + uuid := make([]byte, 16) + for i, x := range []int{ + 0, 2, 4, 6, + 9, 11, + 14, 16, + 19, 21, + 24, 26, 28, 30, 32, 34} { + if v, ok := xtob(s[x:]); !ok { + return nil + } else { + uuid[i] = v + } + } + return uuid +} + +// Equal returns true if uuid1 and uuid2 are equal. +func Equal(uuid1, uuid2 UUID) bool { + return bytes.Equal(uuid1, uuid2) +} + +// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx +// , or "" if uuid is invalid. +func (uuid UUID) String() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// URN returns the RFC 2141 URN form of uuid, +// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid. +func (uuid UUID) URN() string { + if uuid == nil || len(uuid) != 16 { + return "" + } + b := []byte(uuid) + return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x", + b[:4], b[4:6], b[6:8], b[8:10], b[10:]) +} + +// Variant returns the variant encoded in uuid. It returns Invalid if +// uuid is invalid. +func (uuid UUID) Variant() Variant { + if len(uuid) != 16 { + return Invalid + } + switch { + case (uuid[8] & 0xc0) == 0x80: + return RFC4122 + case (uuid[8] & 0xe0) == 0xc0: + return Microsoft + case (uuid[8] & 0xe0) == 0xe0: + return Future + default: + return Reserved + } + panic("unreachable") +} + +// Version returns the verison of uuid. It returns false if uuid is not +// valid. +func (uuid UUID) Version() (Version, bool) { + if len(uuid) != 16 { + return 0, false + } + return Version(uuid[6] >> 4), true +} + +func (v Version) String() string { + if v > 15 { + return fmt.Sprintf("BAD_VERSION_%d", v) + } + return fmt.Sprintf("VERSION_%d", v) +} + +func (v Variant) String() string { + switch v { + case RFC4122: + return "RFC4122" + case Reserved: + return "Reserved" + case Microsoft: + return "Microsoft" + case Future: + return "Future" + case Invalid: + return "Invalid" + } + return fmt.Sprintf("BadVariant%d", int(v)) +} + +// SetRand sets the random number generator to r, which implents io.Reader. +// If r.Read returns an error when the package requests random data then +// a panic will be issued. +// +// Calling SetRand with nil sets the random number generator to the default +// generator. +func SetRand(r io.Reader) { + if r == nil { + rander = rand.Reader + return + } + rander = r +} diff --git a/vendor/github.com/Xe/uuid/version1.go b/vendor/github.com/Xe/uuid/version1.go new file mode 100644 index 0000000..6358004 --- /dev/null +++ b/vendor/github.com/Xe/uuid/version1.go @@ -0,0 +1,41 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +import ( + "encoding/binary" +) + +// NewUUID returns a Version 1 UUID based on the current NodeID and clock +// sequence, and the current time. If the NodeID has not been set by SetNodeID +// or SetNodeInterface then it will be set automatically. If the NodeID cannot +// be set NewUUID returns nil. If clock sequence has not been set by +// SetClockSequence then it will be set automatically. If GetTime fails to +// return the current NewUUID returns nil. +func NewUUID() UUID { + if nodeID == nil { + SetNodeInterface("") + } + + now, err := GetTime() + if err != nil { + return nil + } + + uuid := make([]byte, 16) + + time_low := uint32(now & 0xffffffff) + time_mid := uint16((now >> 32) & 0xffff) + time_hi := uint16((now >> 48) & 0x0fff) + time_hi |= 0x1000 // Version 1 + + binary.BigEndian.PutUint32(uuid[0:], time_low) + binary.BigEndian.PutUint16(uuid[4:], time_mid) + binary.BigEndian.PutUint16(uuid[6:], time_hi) + binary.BigEndian.PutUint16(uuid[8:], clock_seq) + copy(uuid[10:], nodeID) + + return uuid +} diff --git a/vendor/github.com/Xe/uuid/version4.go b/vendor/github.com/Xe/uuid/version4.go new file mode 100644 index 0000000..b3d4a36 --- /dev/null +++ b/vendor/github.com/Xe/uuid/version4.go @@ -0,0 +1,25 @@ +// Copyright 2011 Google Inc. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package uuid + +// Random returns a Random (Version 4) UUID or panics. +// +// The strength of the UUIDs is based on the strength of the crypto/rand +// package. +// +// A note about uniqueness derived from from the UUID Wikipedia entry: +// +// Randomly generated UUIDs have 122 random bits. One's annual risk of being +// hit by a meteorite is estimated to be one chance in 17 billion, that +// means the probability is about 0.00000000006 (6 × 10−11), +// equivalent to the odds of creating a few tens of trillions of UUIDs in a +// year and having one duplicate. +func NewRandom() UUID { + uuid := make([]byte, 16) + randomBits([]byte(uuid)) + uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4 + uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10 + return uuid +} diff --git a/vendor/github.com/caarlos0/env/env.go b/vendor/github.com/caarlos0/env/env.go new file mode 100644 index 0000000..9c5cce3 --- /dev/null +++ b/vendor/github.com/caarlos0/env/env.go @@ -0,0 +1,285 @@ +package env + +import ( + "errors" + "os" + "reflect" + "strconv" + "strings" + "time" +) + +var ( + // ErrNotAStructPtr is returned if you pass something that is not a pointer to a + // Struct to Parse + ErrNotAStructPtr = errors.New("Expected a pointer to a Struct") + // ErrUnsupportedType if the struct field type is not supported by env + ErrUnsupportedType = errors.New("Type is not supported") + // ErrUnsupportedSliceType if the slice element type is not supported by env + ErrUnsupportedSliceType = errors.New("Unsupported slice type") + // Friendly names for reflect types + sliceOfInts = reflect.TypeOf([]int(nil)) + sliceOfInt64s = reflect.TypeOf([]int64(nil)) + sliceOfStrings = reflect.TypeOf([]string(nil)) + sliceOfBools = reflect.TypeOf([]bool(nil)) + sliceOfFloat32s = reflect.TypeOf([]float32(nil)) + sliceOfFloat64s = reflect.TypeOf([]float64(nil)) +) + +// Parse parses a struct containing `env` tags and loads its values from +// environment variables. +func Parse(v interface{}) error { + ptrRef := reflect.ValueOf(v) + if ptrRef.Kind() != reflect.Ptr { + return ErrNotAStructPtr + } + ref := ptrRef.Elem() + if ref.Kind() != reflect.Struct { + return ErrNotAStructPtr + } + return doParse(ref) +} + +func doParse(ref reflect.Value) error { + refType := ref.Type() + var errorList []string + + for i := 0; i < refType.NumField(); i++ { + value, err := get(refType.Field(i)) + if err != nil { + errorList = append(errorList, err.Error()) + continue + } + if value == "" { + continue + } + if err := set(ref.Field(i), refType.Field(i), value); err != nil { + errorList = append(errorList, err.Error()) + continue + } + } + if len(errorList) == 0 { + return nil + } + return errors.New(strings.Join(errorList, ". ")) +} + +func get(field reflect.StructField) (string, error) { + var ( + val string + err error + ) + + key, opts := parseKeyForOption(field.Tag.Get("env")) + + defaultValue := field.Tag.Get("envDefault") + val = getOr(key, defaultValue) + + if len(opts) > 0 { + for _, opt := range opts { + // The only option supported is "required". + switch opt { + case "": + break + case "required": + val, err = getRequired(key) + default: + err = errors.New("Env tag option " + opt + " not supported.") + } + } + } + + return val, err +} + +// split the env tag's key into the expected key and desired option, if any. +func parseKeyForOption(key string) (string, []string) { + opts := strings.Split(key, ",") + return opts[0], opts[1:] +} + +func getRequired(key string) (string, error) { + if value, ok := os.LookupEnv(key); ok { + return value, nil + } + // We do not use fmt.Errorf to avoid another import. + return "", errors.New("Required environment variable " + key + " is not set") +} + +func getOr(key, defaultValue string) string { + value, ok := os.LookupEnv(key) + if ok { + return value + } + return defaultValue +} + +func set(field reflect.Value, refType reflect.StructField, value string) error { + switch field.Kind() { + case reflect.Slice: + separator := refType.Tag.Get("envSeparator") + return handleSlice(field, value, separator) + case reflect.String: + field.SetString(value) + case reflect.Bool: + bvalue, err := strconv.ParseBool(value) + if err != nil { + return err + } + field.SetBool(bvalue) + case reflect.Int: + intValue, err := strconv.ParseInt(value, 10, 32) + if err != nil { + return err + } + field.SetInt(intValue) + case reflect.Uint: + uintValue, err := strconv.ParseUint(value, 10, 32) + if err != nil { + return err + } + field.SetUint(uintValue) + case reflect.Float32: + v, err := strconv.ParseFloat(value, 32) + if err != nil { + return err + } + field.SetFloat(v) + case reflect.Float64: + v, err := strconv.ParseFloat(value, 64) + if err != nil { + return err + } + field.Set(reflect.ValueOf(v)) + case reflect.Int64: + if refType.Type.String() == "time.Duration" { + dValue, err := time.ParseDuration(value) + if err != nil { + return err + } + field.Set(reflect.ValueOf(dValue)) + } else { + intValue, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return err + } + field.SetInt(intValue) + } + default: + return ErrUnsupportedType + } + return nil +} + +func handleSlice(field reflect.Value, value, separator string) error { + if separator == "" { + separator = "," + } + + splitData := strings.Split(value, separator) + + switch field.Type() { + case sliceOfStrings: + field.Set(reflect.ValueOf(splitData)) + case sliceOfInts: + intData, err := parseInts(splitData) + if err != nil { + return err + } + field.Set(reflect.ValueOf(intData)) + case sliceOfInt64s: + int64Data, err := parseInt64s(splitData) + if err != nil { + return err + } + field.Set(reflect.ValueOf(int64Data)) + + case sliceOfFloat32s: + data, err := parseFloat32s(splitData) + if err != nil { + return err + } + field.Set(reflect.ValueOf(data)) + case sliceOfFloat64s: + data, err := parseFloat64s(splitData) + if err != nil { + return err + } + field.Set(reflect.ValueOf(data)) + case sliceOfBools: + boolData, err := parseBools(splitData) + if err != nil { + return err + } + field.Set(reflect.ValueOf(boolData)) + default: + return ErrUnsupportedSliceType + } + return nil +} + +func parseInts(data []string) ([]int, error) { + var intSlice []int + + for _, v := range data { + intValue, err := strconv.ParseInt(v, 10, 32) + if err != nil { + return nil, err + } + intSlice = append(intSlice, int(intValue)) + } + return intSlice, nil +} + +func parseInt64s(data []string) ([]int64, error) { + var intSlice []int64 + + for _, v := range data { + intValue, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, err + } + intSlice = append(intSlice, int64(intValue)) + } + return intSlice, nil +} + +func parseFloat32s(data []string) ([]float32, error) { + var float32Slice []float32 + + for _, v := range data { + data, err := strconv.ParseFloat(v, 32) + if err != nil { + return nil, err + } + float32Slice = append(float32Slice, float32(data)) + } + return float32Slice, nil +} + +func parseFloat64s(data []string) ([]float64, error) { + var float64Slice []float64 + + for _, v := range data { + data, err := strconv.ParseFloat(v, 64) + if err != nil { + return nil, err + } + float64Slice = append(float64Slice, float64(data)) + } + return float64Slice, nil +} + +func parseBools(data []string) ([]bool, error) { + var boolSlice []bool + + for _, v := range data { + bvalue, err := strconv.ParseBool(v) + if err != nil { + return nil, err + } + + boolSlice = append(boolSlice, bvalue) + } + return boolSlice, nil +} diff --git a/vendor/github.com/nats-io/go-nats/context.go b/vendor/github.com/nats-io/go-nats/context.go new file mode 100644 index 0000000..be6ada4 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/context.go @@ -0,0 +1,166 @@ +// Copyright 2012-2017 Apcera Inc. All rights reserved. + +// +build go1.7 + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "context" + "fmt" + "reflect" +) + +// RequestWithContext takes a context, a subject and payload +// in bytes and request expecting a single response. +func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if nc == nil { + return nil, ErrInvalidConnection + } + + nc.mu.Lock() + // If user wants the old style. + if nc.Opts.UseOldRequestStyle { + nc.mu.Unlock() + return nc.oldRequestWithContext(ctx, subj, data) + } + + // Do setup for the new style. + if nc.respMap == nil { + // _INBOX wildcard + nc.respSub = fmt.Sprintf("%s.*", NewInbox()) + nc.respMap = make(map[string]chan *Msg) + } + // Create literal Inbox and map to a chan msg. + mch := make(chan *Msg, RequestChanLen) + respInbox := nc.newRespInbox() + token := respToken(respInbox) + nc.respMap[token] = mch + createSub := nc.respMux == nil + ginbox := nc.respSub + nc.mu.Unlock() + + if createSub { + // Make sure scoped subscription is setup only once. + var err error + nc.respSetup.Do(func() { err = nc.createRespMux(ginbox) }) + if err != nil { + return nil, err + } + } + + err := nc.PublishRequest(subj, respInbox, data) + if err != nil { + return nil, err + } + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-ctx.Done(): + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ctx.Err() + } + + return msg, nil +} + +// oldRequestWithContext utilizes inbox and subscription per request. +func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) { + inbox := NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.PublishRequest(subj, inbox, data) + if err != nil { + return nil, err + } + + return s.NextMsgWithContext(ctx) +} + +// NextMsgWithContext takes a context and returns the next message +// available to a synchronous subscriber, blocking until it is delivered +// or context gets canceled. +func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) { + if ctx == nil { + return nil, ErrInvalidContext + } + if s == nil { + return nil, ErrBadSubscription + } + + s.mu.Lock() + err := s.validateNextMsgState() + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + err := s.processNextMsgDelivered(msg) + if err != nil { + return nil, err + } + case <-ctx.Done(): + return nil, ctx.Err() + } + + return msg, nil +} + +// RequestWithContext will create an Inbox and perform a Request +// using the provided cancellation context with the Inbox reply +// for the data v. A response will be decoded into the vPtrResponse. +func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v interface{}, vPtr interface{}) error { + if ctx == nil { + return ErrInvalidContext + } + + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.RequestWithContext(ctx, subject, b) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err := c.Enc.Decode(m.Subject, m.Data, vPtr) + if err != nil { + return err + } + } + + return nil +} diff --git a/vendor/github.com/nats-io/go-nats/enc.go b/vendor/github.com/nats-io/go-nats/enc.go new file mode 100644 index 0000000..291b782 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/enc.go @@ -0,0 +1,249 @@ +// Copyright 2012-2015 Apcera Inc. All rights reserved. + +package nats + +import ( + "errors" + "fmt" + "reflect" + "sync" + "time" + + // Default Encoders + . "github.com/nats-io/go-nats/encoders/builtin" +) + +// Encoder interface is for all register encoders +type Encoder interface { + Encode(subject string, v interface{}) ([]byte, error) + Decode(subject string, data []byte, vPtr interface{}) error +} + +var encMap map[string]Encoder +var encLock sync.Mutex + +// Indexe names into the Registered Encoders. +const ( + JSON_ENCODER = "json" + GOB_ENCODER = "gob" + DEFAULT_ENCODER = "default" +) + +func init() { + encMap = make(map[string]Encoder) + // Register json, gob and default encoder + RegisterEncoder(JSON_ENCODER, &JsonEncoder{}) + RegisterEncoder(GOB_ENCODER, &GobEncoder{}) + RegisterEncoder(DEFAULT_ENCODER, &DefaultEncoder{}) +} + +// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to +// a nats server and have an extendable encoder system that will encode and decode messages +// from raw Go types. +type EncodedConn struct { + Conn *Conn + Enc Encoder +} + +// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered +// encoder. +func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) { + if c == nil { + return nil, errors.New("nats: Nil Connection") + } + if c.IsClosed() { + return nil, ErrConnectionClosed + } + ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)} + if ec.Enc == nil { + return nil, fmt.Errorf("No encoder registered for '%s'", encType) + } + return ec, nil +} + +// RegisterEncoder will register the encType with the given Encoder. Useful for customization. +func RegisterEncoder(encType string, enc Encoder) { + encLock.Lock() + defer encLock.Unlock() + encMap[encType] = enc +} + +// EncoderForType will return the registered Encoder for the encType. +func EncoderForType(encType string) Encoder { + encLock.Lock() + defer encLock.Unlock() + return encMap[encType] +} + +// Publish publishes the data argument to the given subject. The data argument +// will be encoded using the associated encoder. +func (c *EncodedConn) Publish(subject string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, _EMPTY_, b) +} + +// PublishRequest will perform a Publish() expecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (c *EncodedConn) PublishRequest(subject, reply string, v interface{}) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + return c.Conn.publish(subject, reply, b) +} + +// Request will create an Inbox and perform a Request() call +// with the Inbox reply for the data v. A response will be +// decoded into the vPtrResponse. +func (c *EncodedConn) Request(subject string, v interface{}, vPtr interface{}, timeout time.Duration) error { + b, err := c.Enc.Encode(subject, v) + if err != nil { + return err + } + m, err := c.Conn.Request(subject, b, timeout) + if err != nil { + return err + } + if reflect.TypeOf(vPtr) == emptyMsgType { + mPtr := vPtr.(*Msg) + *mPtr = *m + } else { + err = c.Enc.Decode(m.Subject, m.Data, vPtr) + } + return err +} + +// Handler is a specific callback used for Subscribe. It is generalized to +// an interface{}, but we will discover its format and arguments at runtime +// and perform the correct callback, including de-marshaling JSON strings +// back into the appropriate struct based on the signature of the Handler. +// +// Handlers are expected to have one of four signatures. +// +// type person struct { +// Name string `json:"name,omitempty"` +// Age uint `json:"age,omitempty"` +// } +// +// handler := func(m *Msg) +// handler := func(p *person) +// handler := func(subject string, o *obj) +// handler := func(subject, reply string, o *obj) +// +// These forms allow a callback to request a raw Msg ptr, where the processing +// of the message from the wire is untouched. Process a JSON representation +// and demarshal it into the given struct, e.g. person. +// There are also variants where the callback wants either the subject, or the +// subject and the reply subject. +type Handler interface{} + +// Dissect the cb Handler's signature +func argInfo(cb Handler) (reflect.Type, int) { + cbType := reflect.TypeOf(cb) + if cbType.Kind() != reflect.Func { + panic("nats: Handler needs to be a func") + } + numArgs := cbType.NumIn() + if numArgs == 0 { + return nil, numArgs + } + return cbType.In(numArgs - 1), numArgs +} + +var emptyMsgType = reflect.TypeOf(&Msg{}) + +// Subscribe will create a subscription on the given subject and process incoming +// messages using the specified Handler. The Handler should be a func that matches +// a signature from the description of Handler from above. +func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, _EMPTY_, cb) +} + +// QueueSubscribe will create a queue subscription on the given subject and process +// incoming messages using the specified Handler. The Handler should be a func that +// matches a signature from the description of Handler from above. +func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) { + return c.subscribe(subject, queue, cb) +} + +// Internal implementation that all public functions will use. +func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) { + if cb == nil { + return nil, errors.New("nats: Handler required for EncodedConn Subscription") + } + argType, numArgs := argInfo(cb) + if argType == nil { + return nil, errors.New("nats: Handler requires at least one argument") + } + + cbValue := reflect.ValueOf(cb) + wantsRaw := (argType == emptyMsgType) + + natsCB := func(m *Msg) { + var oV []reflect.Value + if wantsRaw { + oV = []reflect.Value{reflect.ValueOf(m)} + } else { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach <- func() { + c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error())) + } + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + + // Callback Arity + switch numArgs { + case 1: + oV = []reflect.Value{oPtr} + case 2: + subV := reflect.ValueOf(m.Subject) + oV = []reflect.Value{subV, oPtr} + case 3: + subV := reflect.ValueOf(m.Subject) + replyV := reflect.ValueOf(m.Reply) + oV = []reflect.Value{subV, replyV, oPtr} + } + + } + cbValue.Call(oV) + } + + return c.Conn.subscribe(subject, queue, natsCB, nil) +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) { + return c.Conn.FlushTimeout(timeout) +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (c *EncodedConn) Flush() error { + return c.Conn.Flush() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush(), etc. +func (c *EncodedConn) Close() { + c.Conn.Close() +} + +// LastError reports the last error encountered via the Connection. +func (c *EncodedConn) LastError() error { + return c.Conn.err +} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go new file mode 100644 index 0000000..82467ce --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/encoders/builtin/default_enc.go @@ -0,0 +1,106 @@ +// Copyright 2012-2015 Apcera Inc. All rights reserved. + +package builtin + +import ( + "bytes" + "fmt" + "reflect" + "strconv" + "unsafe" +) + +// DefaultEncoder implementation for EncodedConn. +// This encoder will leave []byte and string untouched, but will attempt to +// turn numbers into appropriate strings that can be decoded. It will also +// propely encoded and decode bools. If will encode a struct, but if you want +// to properly handle structures you should use JsonEncoder. +type DefaultEncoder struct { + // Empty +} + +var trueB = []byte("true") +var falseB = []byte("false") +var nilB = []byte("") + +// Encode +func (je *DefaultEncoder) Encode(subject string, v interface{}) ([]byte, error) { + switch arg := v.(type) { + case string: + bytes := *(*[]byte)(unsafe.Pointer(&arg)) + return bytes, nil + case []byte: + return arg, nil + case bool: + if arg { + return trueB, nil + } else { + return falseB, nil + } + case nil: + return nilB, nil + default: + var buf bytes.Buffer + fmt.Fprintf(&buf, "%+v", arg) + return buf.Bytes(), nil + } +} + +// Decode +func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr interface{}) error { + // Figure out what it's pointing to... + sData := *(*string)(unsafe.Pointer(&data)) + switch arg := vPtr.(type) { + case *string: + *arg = sData + return nil + case *[]byte: + *arg = data + return nil + case *int: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int(n) + return nil + case *int32: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int32(n) + return nil + case *int64: + n, err := strconv.ParseInt(sData, 10, 64) + if err != nil { + return err + } + *arg = int64(n) + return nil + case *float32: + n, err := strconv.ParseFloat(sData, 32) + if err != nil { + return err + } + *arg = float32(n) + return nil + case *float64: + n, err := strconv.ParseFloat(sData, 64) + if err != nil { + return err + } + *arg = float64(n) + return nil + case *bool: + b, err := strconv.ParseBool(sData) + if err != nil { + return err + } + *arg = b + return nil + default: + vt := reflect.TypeOf(arg).Elem() + return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt) + } +} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go new file mode 100644 index 0000000..988ff42 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/encoders/builtin/gob_enc.go @@ -0,0 +1,34 @@ +// Copyright 2013-2015 Apcera Inc. All rights reserved. + +package builtin + +import ( + "bytes" + "encoding/gob" +) + +// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/gob to Marshal +// and Unmarshal most types, including structs. +type GobEncoder struct { + // Empty +} + +// FIXME(dlc) - This could probably be more efficient. + +// Encode +func (ge *GobEncoder) Encode(subject string, v interface{}) ([]byte, error) { + b := new(bytes.Buffer) + enc := gob.NewEncoder(b) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Decode +func (ge *GobEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { + dec := gob.NewDecoder(bytes.NewBuffer(data)) + err = dec.Decode(vPtr) + return +} diff --git a/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go b/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go new file mode 100644 index 0000000..3b269ef --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/encoders/builtin/json_enc.go @@ -0,0 +1,45 @@ +// Copyright 2012-2015 Apcera Inc. All rights reserved. + +package builtin + +import ( + "encoding/json" + "strings" +) + +// JsonEncoder is a JSON Encoder implementation for EncodedConn. +// This encoder will use the builtin encoding/json to Marshal +// and Unmarshal most types, including structs. +type JsonEncoder struct { + // Empty +} + +// Encode +func (je *JsonEncoder) Encode(subject string, v interface{}) ([]byte, error) { + b, err := json.Marshal(v) + if err != nil { + return nil, err + } + return b, nil +} + +// Decode +func (je *JsonEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) { + switch arg := vPtr.(type) { + case *string: + // If they want a string and it is a JSON string, strip quotes + // This allows someone to send a struct but receive as a plain string + // This cast should be efficient for Go 1.3 and beyond. + str := string(data) + if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) { + *arg = str[1 : len(str)-1] + } else { + *arg = str + } + case *[]byte: + *arg = data + default: + err = json.Unmarshal(data, arg) + } + return +} diff --git a/vendor/github.com/nats-io/go-nats/nats.go b/vendor/github.com/nats-io/go-nats/nats.go new file mode 100644 index 0000000..65eb95c --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/nats.go @@ -0,0 +1,2975 @@ +// Copyright 2012-2017 Apcera Inc. All rights reserved. + +// A Go client for the NATS messaging system (https://nats.io). +package nats + +import ( + "bufio" + "bytes" + "crypto/tls" + "crypto/x509" + "encoding/json" + "errors" + "fmt" + "io/ioutil" + "math/rand" + "net" + "net/url" + "regexp" + "runtime" + "strconv" + "strings" + "sync" + "time" + + "github.com/nats-io/go-nats/util" + "github.com/nats-io/nuid" +) + +// Default Constants +const ( + Version = "1.3.1" + DefaultURL = "nats://localhost:4222" + DefaultPort = 4222 + DefaultMaxReconnect = 60 + DefaultReconnectWait = 2 * time.Second + DefaultTimeout = 2 * time.Second + DefaultPingInterval = 2 * time.Minute + DefaultMaxPingOut = 2 + DefaultMaxChanLen = 8192 // 8k + DefaultReconnectBufSize = 8 * 1024 * 1024 // 8MB + RequestChanLen = 8 + LangString = "go" +) + +// STALE_CONNECTION is for detection and proper handling of stale connections. +const STALE_CONNECTION = "stale connection" + +// PERMISSIONS_ERR is for when nats server subject authorization has failed. +const PERMISSIONS_ERR = "permissions violation" + +// AUTHORIZATION_ERR is for when nats server user authorization has failed. +const AUTHORIZATION_ERR = "authorization violation" + +// Errors +var ( + ErrConnectionClosed = errors.New("nats: connection closed") + ErrSecureConnRequired = errors.New("nats: secure connection required") + ErrSecureConnWanted = errors.New("nats: secure connection not available") + ErrBadSubscription = errors.New("nats: invalid subscription") + ErrTypeSubscription = errors.New("nats: invalid subscription type") + ErrBadSubject = errors.New("nats: invalid subject") + ErrSlowConsumer = errors.New("nats: slow consumer, messages dropped") + ErrTimeout = errors.New("nats: timeout") + ErrBadTimeout = errors.New("nats: timeout invalid") + ErrAuthorization = errors.New("nats: authorization violation") + ErrNoServers = errors.New("nats: no servers available for connection") + ErrJsonParse = errors.New("nats: connect message, json parse error") + ErrChanArg = errors.New("nats: argument needs to be a channel type") + ErrMaxPayload = errors.New("nats: maximum payload exceeded") + ErrMaxMessages = errors.New("nats: maximum messages delivered") + ErrSyncSubRequired = errors.New("nats: illegal call on an async subscription") + ErrMultipleTLSConfigs = errors.New("nats: multiple tls.Configs not allowed") + ErrNoInfoReceived = errors.New("nats: protocol exception, INFO not received") + ErrReconnectBufExceeded = errors.New("nats: outbound buffer limit exceeded") + ErrInvalidConnection = errors.New("nats: invalid connection") + ErrInvalidMsg = errors.New("nats: invalid message or message nil") + ErrInvalidArg = errors.New("nats: invalid argument") + ErrInvalidContext = errors.New("nats: invalid context") + ErrStaleConnection = errors.New("nats: " + STALE_CONNECTION) +) + +// GetDefaultOptions returns default configuration options for the client. +func GetDefaultOptions() Options { + return Options{ + AllowReconnect: true, + MaxReconnect: DefaultMaxReconnect, + ReconnectWait: DefaultReconnectWait, + Timeout: DefaultTimeout, + PingInterval: DefaultPingInterval, + MaxPingsOut: DefaultMaxPingOut, + SubChanLen: DefaultMaxChanLen, + ReconnectBufSize: DefaultReconnectBufSize, + } +} + +// DEPRECATED: Use GetDefaultOptions() instead. +// DefaultOptions is not safe for use by multiple clients. +// For details see #308. +var DefaultOptions = GetDefaultOptions() + +// Status represents the state of the connection. +type Status int + +const ( + DISCONNECTED = Status(iota) + CONNECTED + CLOSED + RECONNECTING + CONNECTING +) + +// ConnHandler is used for asynchronous events such as +// disconnected and closed connections. +type ConnHandler func(*Conn) + +// ErrHandler is used to process asynchronous errors encountered +// while processing inbound messages. +type ErrHandler func(*Conn, *Subscription, error) + +// asyncCB is used to preserve order for async callbacks. +type asyncCB func() + +// Option is a function on the options for a connection. +type Option func(*Options) error + +// Options can be used to create a customized connection. +type Options struct { + + // Url represents a single NATS server url to which the client + // will be connecting. If the Servers option is also set, it + // then becomes the first server in the Servers array. + Url string + + // Servers is a configured set of servers which this client + // will use when attempting to connect. + Servers []string + + // NoRandomize configures whether we will randomize the + // server pool. + NoRandomize bool + + // Name is an optional name label which will be sent to the server + // on CONNECT to identify the client. + Name string + + // Verbose signals the server to send an OK ack for commands + // successfully processed by the server. + Verbose bool + + // Pedantic signals the server whether it should be doing further + // validation of subjects. + Pedantic bool + + // Secure enables TLS secure connections that skip server + // verification by default. NOT RECOMMENDED. + Secure bool + + // TLSConfig is a custom TLS configuration to use for secure + // transports. + TLSConfig *tls.Config + + // AllowReconnect enables reconnection logic to be used when we + // encounter a disconnect from the current server. + AllowReconnect bool + + // MaxReconnect sets the number of reconnect attempts that will be + // tried before giving up. If negative, then it will never give up + // trying to reconnect. + MaxReconnect int + + // ReconnectWait sets the time to backoff after attempting a reconnect + // to a server that we were already connected to previously. + ReconnectWait time.Duration + + // Timeout sets the timeout for a Dial operation on a connection. + Timeout time.Duration + + // FlusherTimeout is the maximum time to wait for the flusher loop + // to be able to finish writing to the underlying connection. + FlusherTimeout time.Duration + + // PingInterval is the period at which the client will be sending ping + // commands to the server, disabled if 0 or negative. + PingInterval time.Duration + + // MaxPingsOut is the maximum number of pending ping commands that can + // be awaiting a response before raising an ErrStaleConnection error. + MaxPingsOut int + + // ClosedCB sets the closed handler that is called when a client will + // no longer be connected. + ClosedCB ConnHandler + + // DisconnectedCB sets the disconnected handler that is called + // whenever the connection is disconnected. + DisconnectedCB ConnHandler + + // ReconnectedCB sets the reconnected handler called whenever + // the connection is successfully reconnected. + ReconnectedCB ConnHandler + + // DiscoveredServersCB sets the callback that is invoked whenever a new + // server has joined the cluster. + DiscoveredServersCB ConnHandler + + // AsyncErrorCB sets the async error handler (e.g. slow consumer errors) + AsyncErrorCB ErrHandler + + // ReconnectBufSize is the size of the backing bufio during reconnect. + // Once this has been exhausted publish operations will return an error. + ReconnectBufSize int + + // SubChanLen is the size of the buffered channel used between the socket + // Go routine and the message delivery for SyncSubscriptions. + // NOTE: This does not affect AsyncSubscriptions which are + // dictated by PendingLimits() + SubChanLen int + + // User sets the username to be used when connecting to the server. + User string + + // Password sets the password to be used when connecting to a server. + Password string + + // Token sets the token to be used when connecting to a server. + Token string + + // Dialer allows a custom Dialer when forming connections. + Dialer *net.Dialer + + // UseOldRequestStyle forces the old method of Requests that utilize + // a new Inbox and a new Subscription for each request. + UseOldRequestStyle bool +} + +const ( + // Scratch storage for assembling protocol headers + scratchSize = 512 + + // The size of the bufio reader/writer on top of the socket. + defaultBufSize = 32768 + + // The buffered size of the flush "kick" channel + flushChanSize = 1024 + + // Default server pool size + srvPoolSize = 4 + + // Channel size for the async callback handler. + asyncCBChanSize = 32 + + // NUID size + nuidSize = 22 +) + +// A Conn represents a bare connection to a nats-server. +// It can send and receive []byte payloads. +type Conn struct { + // Keep all members for which we use atomic at the beginning of the + // struct and make sure they are all 64bits (or use padding if necessary). + // atomic.* functions crash on 32bit machines if operand is not aligned + // at 64bit. See https://github.com/golang/go/issues/599 + Statistics + mu sync.Mutex + Opts Options + wg *sync.WaitGroup + url *url.URL + conn net.Conn + srvPool []*srv + urls map[string]struct{} // Keep track of all known URLs (used by processInfo) + bw *bufio.Writer + pending *bytes.Buffer + fch chan struct{} + info serverInfo + ssid int64 + subsMu sync.RWMutex + subs map[int64]*Subscription + ach chan asyncCB + pongs []chan struct{} + scratch [scratchSize]byte + status Status + initc bool // true if the connection is performing the initial connect + err error + ps *parseState + ptmr *time.Timer + pout int + + // New style response handler + respSub string // The wildcard subject + respMux *Subscription // A single response subscription + respMap map[string]chan *Msg // Request map for the response msg channels + respSetup sync.Once // Ensures response subscription occurs once +} + +// A Subscription represents interest in a given subject. +type Subscription struct { + mu sync.Mutex + sid int64 + + // Subject that represents this subscription. This can be different + // than the received subject inside a Msg if this is a wildcard. + Subject string + + // Optional queue group name. If present, all subscriptions with the + // same name will form a distributed queue, and each message will + // only be processed by one member of the group. + Queue string + + delivered uint64 + max uint64 + conn *Conn + mcb MsgHandler + mch chan *Msg + closed bool + sc bool + connClosed bool + + // Type of Subscription + typ SubscriptionType + + // Async linked list + pHead *Msg + pTail *Msg + pCond *sync.Cond + + // Pending stats, async subscriptions, high-speed etc. + pMsgs int + pBytes int + pMsgsMax int + pBytesMax int + pMsgsLimit int + pBytesLimit int + dropped int +} + +// Msg is a structure used by Subscribers and PublishMsg(). +type Msg struct { + Subject string + Reply string + Data []byte + Sub *Subscription + next *Msg +} + +// Tracks various stats received and sent on this connection, +// including counts for messages and bytes. +type Statistics struct { + InMsgs uint64 + OutMsgs uint64 + InBytes uint64 + OutBytes uint64 + Reconnects uint64 +} + +// Tracks individual backend servers. +type srv struct { + url *url.URL + didConnect bool + reconnects int + lastAttempt time.Time + isImplicit bool +} + +type serverInfo struct { + Id string `json:"server_id"` + Host string `json:"host"` + Port uint `json:"port"` + Version string `json:"version"` + AuthRequired bool `json:"auth_required"` + TLSRequired bool `json:"tls_required"` + MaxPayload int64 `json:"max_payload"` + ConnectURLs []string `json:"connect_urls,omitempty"` +} + +const ( + // clientProtoZero is the original client protocol from 2009. + // http://nats.io/documentation/internals/nats-protocol/ + /* clientProtoZero */ _ = iota + // clientProtoInfo signals a client can receive more then the original INFO block. + // This can be used to update clients on other cluster members, etc. + clientProtoInfo +) + +type connectInfo struct { + Verbose bool `json:"verbose"` + Pedantic bool `json:"pedantic"` + User string `json:"user,omitempty"` + Pass string `json:"pass,omitempty"` + Token string `json:"auth_token,omitempty"` + TLS bool `json:"tls_required"` + Name string `json:"name"` + Lang string `json:"lang"` + Version string `json:"version"` + Protocol int `json:"protocol"` +} + +// MsgHandler is a callback function that processes messages delivered to +// asynchronous subscribers. +type MsgHandler func(msg *Msg) + +// Connect will attempt to connect to the NATS system. +// The url can contain username/password semantics. e.g. nats://derek:pass@localhost:4222 +// Comma separated arrays are also supported, e.g. urlA, urlB. +// Options start with the defaults but can be overridden. +func Connect(url string, options ...Option) (*Conn, error) { + opts := GetDefaultOptions() + opts.Servers = processUrlString(url) + for _, opt := range options { + if err := opt(&opts); err != nil { + return nil, err + } + } + return opts.Connect() +} + +// Options that can be passed to Connect. + +// Name is an Option to set the client name. +func Name(name string) Option { + return func(o *Options) error { + o.Name = name + return nil + } +} + +// Secure is an Option to enable TLS secure connections that skip server verification by default. +// Pass a TLS Configuration for proper TLS. +func Secure(tls ...*tls.Config) Option { + return func(o *Options) error { + o.Secure = true + // Use of variadic just simplifies testing scenarios. We only take the first one. + // fixme(DLC) - Could panic if more than one. Could also do TLS option. + if len(tls) > 1 { + return ErrMultipleTLSConfigs + } + if len(tls) == 1 { + o.TLSConfig = tls[0] + } + return nil + } +} + +// RootCAs is a helper option to provide the RootCAs pool from a list of filenames. If Secure is +// not already set this will set it as well. +func RootCAs(file ...string) Option { + return func(o *Options) error { + pool := x509.NewCertPool() + for _, f := range file { + rootPEM, err := ioutil.ReadFile(f) + if err != nil || rootPEM == nil { + return fmt.Errorf("nats: error loading or parsing rootCA file: %v", err) + } + ok := pool.AppendCertsFromPEM([]byte(rootPEM)) + if !ok { + return fmt.Errorf("nats: failed to parse root certificate from %q", f) + } + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.RootCAs = pool + o.Secure = true + return nil + } +} + +// ClientCert is a helper option to provide the client certificate from a file. If Secure is +// not already set this will set it as well +func ClientCert(certFile, keyFile string) Option { + return func(o *Options) error { + cert, err := tls.LoadX509KeyPair(certFile, keyFile) + if err != nil { + return fmt.Errorf("nats: error loading client certificate: %v", err) + } + cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0]) + if err != nil { + return fmt.Errorf("nats: error parsing client certificate: %v", err) + } + if o.TLSConfig == nil { + o.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + o.TLSConfig.Certificates = []tls.Certificate{cert} + o.Secure = true + return nil + } +} + +// NoReconnect is an Option to turn off reconnect behavior. +func NoReconnect() Option { + return func(o *Options) error { + o.AllowReconnect = false + return nil + } +} + +// DontRandomize is an Option to turn off randomizing the server pool. +func DontRandomize() Option { + return func(o *Options) error { + o.NoRandomize = true + return nil + } +} + +// ReconnectWait is an Option to set the wait time between reconnect attempts. +func ReconnectWait(t time.Duration) Option { + return func(o *Options) error { + o.ReconnectWait = t + return nil + } +} + +// MaxReconnects is an Option to set the maximum number of reconnect attempts. +func MaxReconnects(max int) Option { + return func(o *Options) error { + o.MaxReconnect = max + return nil + } +} + +// Timeout is an Option to set the timeout for Dial on a connection. +func Timeout(t time.Duration) Option { + return func(o *Options) error { + o.Timeout = t + return nil + } +} + +// DisconnectHandler is an Option to set the disconnected handler. +func DisconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DisconnectedCB = cb + return nil + } +} + +// ReconnectHandler is an Option to set the reconnected handler. +func ReconnectHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ReconnectedCB = cb + return nil + } +} + +// ClosedHandler is an Option to set the closed handler. +func ClosedHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.ClosedCB = cb + return nil + } +} + +// DiscoveredServersHandler is an Option to set the new servers handler. +func DiscoveredServersHandler(cb ConnHandler) Option { + return func(o *Options) error { + o.DiscoveredServersCB = cb + return nil + } +} + +// ErrHandler is an Option to set the async error handler. +func ErrorHandler(cb ErrHandler) Option { + return func(o *Options) error { + o.AsyncErrorCB = cb + return nil + } +} + +// UserInfo is an Option to set the username and password to +// use when not included directly in the URLs. +func UserInfo(user, password string) Option { + return func(o *Options) error { + o.User = user + o.Password = password + return nil + } +} + +// Token is an Option to set the token to use when not included +// directly in the URLs. +func Token(token string) Option { + return func(o *Options) error { + o.Token = token + return nil + } +} + +// Dialer is an Option to set the dialer which will be used when +// attempting to establish a connection. +func Dialer(dialer *net.Dialer) Option { + return func(o *Options) error { + o.Dialer = dialer + return nil + } +} + +// UseOldRequestyStyle is an Option to force usage of the old Request style. +func UseOldRequestStyle() Option { + return func(o *Options) error { + o.UseOldRequestStyle = true + return nil + } +} + +// Handler processing + +// SetDisconnectHandler will set the disconnect event handler. +func (nc *Conn) SetDisconnectHandler(dcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DisconnectedCB = dcb +} + +// SetReconnectHandler will set the reconnect event handler. +func (nc *Conn) SetReconnectHandler(rcb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ReconnectedCB = rcb +} + +// SetDiscoveredServersHandler will set the discovered servers handler. +func (nc *Conn) SetDiscoveredServersHandler(dscb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.DiscoveredServersCB = dscb +} + +// SetClosedHandler will set the reconnect event handler. +func (nc *Conn) SetClosedHandler(cb ConnHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.ClosedCB = cb +} + +// SetErrHandler will set the async error handler. +func (nc *Conn) SetErrorHandler(cb ErrHandler) { + if nc == nil { + return + } + nc.mu.Lock() + defer nc.mu.Unlock() + nc.Opts.AsyncErrorCB = cb +} + +// Process the url string argument to Connect. Return an array of +// urls, even if only one. +func processUrlString(url string) []string { + urls := strings.Split(url, ",") + for i, s := range urls { + urls[i] = strings.TrimSpace(s) + } + return urls +} + +// Connect will attempt to connect to a NATS server with multiple options. +func (o Options) Connect() (*Conn, error) { + nc := &Conn{Opts: o} + + // Some default options processing. + if nc.Opts.MaxPingsOut == 0 { + nc.Opts.MaxPingsOut = DefaultMaxPingOut + } + // Allow old default for channel length to work correctly. + if nc.Opts.SubChanLen == 0 { + nc.Opts.SubChanLen = DefaultMaxChanLen + } + // Default ReconnectBufSize + if nc.Opts.ReconnectBufSize == 0 { + nc.Opts.ReconnectBufSize = DefaultReconnectBufSize + } + // Ensure that Timeout is not 0 + if nc.Opts.Timeout == 0 { + nc.Opts.Timeout = DefaultTimeout + } + + // Allow custom Dialer for connecting using DialTimeout by default + if nc.Opts.Dialer == nil { + nc.Opts.Dialer = &net.Dialer{ + Timeout: nc.Opts.Timeout, + } + } + + if err := nc.setupServerPool(); err != nil { + return nil, err + } + + // Create the async callback channel. + nc.ach = make(chan asyncCB, asyncCBChanSize) + + if err := nc.connect(); err != nil { + return nil, err + } + + // Spin up the async cb dispatcher on success + go nc.asyncDispatch() + + return nc, nil +} + +const ( + _CRLF_ = "\r\n" + _EMPTY_ = "" + _SPC_ = " " + _PUB_P_ = "PUB " +) + +const ( + _OK_OP_ = "+OK" + _ERR_OP_ = "-ERR" + _PONG_OP_ = "PONG" + _INFO_OP_ = "INFO" +) + +const ( + conProto = "CONNECT %s" + _CRLF_ + pingProto = "PING" + _CRLF_ + pongProto = "PONG" + _CRLF_ + subProto = "SUB %s %s %d" + _CRLF_ + unsubProto = "UNSUB %d %s" + _CRLF_ + okProto = _OK_OP_ + _CRLF_ +) + +// Return the currently selected server +func (nc *Conn) currentServer() (int, *srv) { + for i, s := range nc.srvPool { + if s == nil { + continue + } + if s.url == nc.url { + return i, s + } + } + return -1, nil +} + +// Pop the current server and put onto the end of the list. Select head of list as long +// as number of reconnect attempts under MaxReconnect. +func (nc *Conn) selectNextServer() (*srv, error) { + i, s := nc.currentServer() + if i < 0 { + return nil, ErrNoServers + } + sp := nc.srvPool + num := len(sp) + copy(sp[i:num-1], sp[i+1:num]) + maxReconnect := nc.Opts.MaxReconnect + if maxReconnect < 0 || s.reconnects < maxReconnect { + nc.srvPool[num-1] = s + } else { + nc.srvPool = sp[0 : num-1] + } + if len(nc.srvPool) <= 0 { + nc.url = nil + return nil, ErrNoServers + } + nc.url = nc.srvPool[0].url + return nc.srvPool[0], nil +} + +// Will assign the correct server to the nc.Url +func (nc *Conn) pickServer() error { + nc.url = nil + if len(nc.srvPool) <= 0 { + return ErrNoServers + } + for _, s := range nc.srvPool { + if s != nil { + nc.url = s.url + return nil + } + } + return ErrNoServers +} + +const tlsScheme = "tls" + +// Create the server pool using the options given. +// We will place a Url option first, followed by any +// Server Options. We will randomize the server pool unlesss +// the NoRandomize flag is set. +func (nc *Conn) setupServerPool() error { + nc.srvPool = make([]*srv, 0, srvPoolSize) + nc.urls = make(map[string]struct{}, srvPoolSize) + + // Create srv objects from each url string in nc.Opts.Servers + // and add them to the pool + for _, urlString := range nc.Opts.Servers { + if err := nc.addURLToPool(urlString, false); err != nil { + return err + } + } + + // Randomize if allowed to + if !nc.Opts.NoRandomize { + nc.shufflePool() + } + + // Normally, if this one is set, Options.Servers should not be, + // but we always allowed that, so continue to do so. + if nc.Opts.Url != _EMPTY_ { + // Add to the end of the array + if err := nc.addURLToPool(nc.Opts.Url, false); err != nil { + return err + } + // Then swap it with first to guarantee that Options.Url is tried first. + last := len(nc.srvPool) - 1 + if last > 0 { + nc.srvPool[0], nc.srvPool[last] = nc.srvPool[last], nc.srvPool[0] + } + } else if len(nc.srvPool) <= 0 { + // Place default URL if pool is empty. + if err := nc.addURLToPool(DefaultURL, false); err != nil { + return err + } + } + + // Check for Scheme hint to move to TLS mode. + for _, srv := range nc.srvPool { + if srv.url.Scheme == tlsScheme { + // FIXME(dlc), this is for all in the pool, should be case by case. + nc.Opts.Secure = true + if nc.Opts.TLSConfig == nil { + nc.Opts.TLSConfig = &tls.Config{MinVersion: tls.VersionTLS12} + } + } + } + + return nc.pickServer() +} + +// addURLToPool adds an entry to the server pool +func (nc *Conn) addURLToPool(sURL string, implicit bool) error { + u, err := url.Parse(sURL) + if err != nil { + return err + } + s := &srv{url: u, isImplicit: implicit} + nc.srvPool = append(nc.srvPool, s) + nc.urls[u.Host] = struct{}{} + return nil +} + +// shufflePool swaps randomly elements in the server pool +func (nc *Conn) shufflePool() { + if len(nc.srvPool) <= 1 { + return + } + source := rand.NewSource(time.Now().UnixNano()) + r := rand.New(source) + for i := range nc.srvPool { + j := r.Intn(i + 1) + nc.srvPool[i], nc.srvPool[j] = nc.srvPool[j], nc.srvPool[i] + } +} + +// createConn will connect to the server and wrap the appropriate +// bufio structures. It will do the right thing when an existing +// connection is in place. +func (nc *Conn) createConn() (err error) { + if nc.Opts.Timeout < 0 { + return ErrBadTimeout + } + if _, cur := nc.currentServer(); cur == nil { + return ErrNoServers + } else { + cur.lastAttempt = time.Now() + } + + dialer := nc.Opts.Dialer + nc.conn, err = dialer.Dial("tcp", nc.url.Host) + if err != nil { + return err + } + + // No clue why, but this stalls and kills performance on Mac (Mavericks). + // https://code.google.com/p/go/issues/detail?id=6930 + //if ip, ok := nc.conn.(*net.TCPConn); ok { + // ip.SetReadBuffer(defaultBufSize) + //} + + if nc.pending != nil && nc.bw != nil { + // Move to pending buffer. + nc.bw.Flush() + } + nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) + return nil +} + +// makeTLSConn will wrap an existing Conn using TLS +func (nc *Conn) makeTLSConn() { + // Allow the user to configure their own tls.Config structure, otherwise + // default to InsecureSkipVerify. + // TODO(dlc) - We should make the more secure version the default. + if nc.Opts.TLSConfig != nil { + tlsCopy := util.CloneTLSConfig(nc.Opts.TLSConfig) + // If its blank we will override it with the current host + if tlsCopy.ServerName == _EMPTY_ { + h, _, _ := net.SplitHostPort(nc.url.Host) + tlsCopy.ServerName = h + } + nc.conn = tls.Client(nc.conn, tlsCopy) + } else { + nc.conn = tls.Client(nc.conn, &tls.Config{InsecureSkipVerify: true}) + } + conn := nc.conn.(*tls.Conn) + conn.Handshake() + nc.bw = bufio.NewWriterSize(nc.conn, defaultBufSize) +} + +// waitForExits will wait for all socket watcher Go routines to +// be shutdown before proceeding. +func (nc *Conn) waitForExits(wg *sync.WaitGroup) { + // Kick old flusher forcefully. + select { + case nc.fch <- struct{}{}: + default: + } + + // Wait for any previous go routines. + if wg != nil { + wg.Wait() + } +} + +// spinUpGoRoutines will launch the Go routines responsible for +// reading and writing to the socket. This will be launched via a +// go routine itself to release any locks that may be held. +// We also use a WaitGroup to make sure we only start them on a +// reconnect when the previous ones have exited. +func (nc *Conn) spinUpGoRoutines() { + // Make sure everything has exited. + nc.waitForExits(nc.wg) + + // Create a new waitGroup instance for this run. + nc.wg = &sync.WaitGroup{} + // We will wait on both. + nc.wg.Add(2) + + // Spin up the readLoop and the socket flusher. + go nc.readLoop(nc.wg) + go nc.flusher(nc.wg) + + nc.mu.Lock() + if nc.Opts.PingInterval > 0 { + if nc.ptmr == nil { + nc.ptmr = time.AfterFunc(nc.Opts.PingInterval, nc.processPingTimer) + } else { + nc.ptmr.Reset(nc.Opts.PingInterval) + } + } + nc.mu.Unlock() +} + +// Report the connected server's Url +func (nc *Conn) ConnectedUrl() string { + if nc == nil { + return _EMPTY_ + } + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.url.String() +} + +// Report the connected server's Id +func (nc *Conn) ConnectedServerId() string { + if nc == nil { + return _EMPTY_ + } + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.status != CONNECTED { + return _EMPTY_ + } + return nc.info.Id +} + +// Low level setup for structs, etc +func (nc *Conn) setup() { + nc.subs = make(map[int64]*Subscription) + nc.pongs = make([]chan struct{}, 0, 8) + + nc.fch = make(chan struct{}, flushChanSize) + + // Setup scratch outbound buffer for PUB + pub := nc.scratch[:len(_PUB_P_)] + copy(pub, _PUB_P_) +} + +// Process a connected connection and initialize properly. +func (nc *Conn) processConnectInit() error { + + // Set out deadline for the whole connect process + nc.conn.SetDeadline(time.Now().Add(nc.Opts.Timeout)) + defer nc.conn.SetDeadline(time.Time{}) + + // Set our status to connecting. + nc.status = CONNECTING + + // Process the INFO protocol received from the server + err := nc.processExpectedInfo() + if err != nil { + return err + } + + // Send the CONNECT protocol along with the initial PING protocol. + // Wait for the PONG response (or any error that we get from the server). + err = nc.sendConnect() + if err != nil { + return err + } + + // Reset the number of PING sent out + nc.pout = 0 + + go nc.spinUpGoRoutines() + + return nil +} + +// Main connect function. Will connect to the nats-server +func (nc *Conn) connect() error { + var returnedErr error + + // Create actual socket connection + // For first connect we walk all servers in the pool and try + // to connect immediately. + nc.mu.Lock() + nc.initc = true + // The pool may change inside theloop iteration due to INFO protocol. + for i := 0; i < len(nc.srvPool); i++ { + nc.url = nc.srvPool[i].url + + if err := nc.createConn(); err == nil { + // This was moved out of processConnectInit() because + // that function is now invoked from doReconnect() too. + nc.setup() + + err = nc.processConnectInit() + + if err == nil { + nc.srvPool[i].didConnect = true + nc.srvPool[i].reconnects = 0 + returnedErr = nil + break + } else { + returnedErr = err + nc.mu.Unlock() + nc.close(DISCONNECTED, false) + nc.mu.Lock() + nc.url = nil + } + } else { + // Cancel out default connection refused, will trigger the + // No servers error conditional + if matched, _ := regexp.Match(`connection refused`, []byte(err.Error())); matched { + returnedErr = nil + } + } + } + nc.initc = false + defer nc.mu.Unlock() + + if returnedErr == nil && nc.status != CONNECTED { + returnedErr = ErrNoServers + } + return returnedErr +} + +// This will check to see if the connection should be +// secure. This can be dictated from either end and should +// only be called after the INIT protocol has been received. +func (nc *Conn) checkForSecure() error { + // Check to see if we need to engage TLS + o := nc.Opts + + // Check for mismatch in setups + if o.Secure && !nc.info.TLSRequired { + return ErrSecureConnWanted + } else if nc.info.TLSRequired && !o.Secure { + return ErrSecureConnRequired + } + + // Need to rewrap with bufio + if o.Secure { + nc.makeTLSConn() + } + return nil +} + +// processExpectedInfo will look for the expected first INFO message +// sent when a connection is established. The lock should be held entering. +func (nc *Conn) processExpectedInfo() error { + + c := &control{} + + // Read the protocol + err := nc.readOp(c) + if err != nil { + return err + } + + // The nats protocol should send INFO first always. + if c.op != _INFO_OP_ { + return ErrNoInfoReceived + } + + // Parse the protocol + if err := nc.processInfo(c.args); err != nil { + return err + } + + return nc.checkForSecure() +} + +// Sends a protocol control message by queuing into the bufio writer +// and kicking the flush Go routine. These writes are protected. +func (nc *Conn) sendProto(proto string) { + nc.mu.Lock() + nc.bw.WriteString(proto) + nc.kickFlusher() + nc.mu.Unlock() +} + +// Generate a connect protocol message, issuing user/password if +// applicable. The lock is assumed to be held upon entering. +func (nc *Conn) connectProto() (string, error) { + o := nc.Opts + var user, pass, token string + u := nc.url.User + if u != nil { + // if no password, assume username is authToken + if _, ok := u.Password(); !ok { + token = u.Username() + } else { + user = u.Username() + pass, _ = u.Password() + } + } else { + // Take from options (pssibly all empty strings) + user = nc.Opts.User + pass = nc.Opts.Password + token = nc.Opts.Token + } + cinfo := connectInfo{o.Verbose, o.Pedantic, + user, pass, token, + o.Secure, o.Name, LangString, Version, clientProtoInfo} + b, err := json.Marshal(cinfo) + if err != nil { + return _EMPTY_, ErrJsonParse + } + return fmt.Sprintf(conProto, b), nil +} + +// normalizeErr removes the prefix -ERR, trim spaces and remove the quotes. +func normalizeErr(line string) string { + s := strings.ToLower(strings.TrimSpace(strings.TrimPrefix(line, _ERR_OP_))) + s = strings.TrimLeft(strings.TrimRight(s, "'"), "'") + return s +} + +// Send a connect protocol message to the server, issue user/password if +// applicable. Will wait for a flush to return from the server for error +// processing. +func (nc *Conn) sendConnect() error { + + // Construct the CONNECT protocol string + cProto, err := nc.connectProto() + if err != nil { + return err + } + + // Write the protocol into the buffer + _, err = nc.bw.WriteString(cProto) + if err != nil { + return err + } + + // Add to the buffer the PING protocol + _, err = nc.bw.WriteString(pingProto) + if err != nil { + return err + } + + // Flush the buffer + err = nc.bw.Flush() + if err != nil { + return err + } + + // Now read the response from the server. + br := bufio.NewReaderSize(nc.conn, defaultBufSize) + line, err := br.ReadString('\n') + if err != nil { + return err + } + + // If opts.Verbose is set, handle +OK + if nc.Opts.Verbose && line == okProto { + // Read the rest now... + line, err = br.ReadString('\n') + if err != nil { + return err + } + } + + // We expect a PONG + if line != pongProto { + // But it could be something else, like -ERR + + // Since we no longer use ReadLine(), trim the trailing "\r\n" + line = strings.TrimRight(line, "\r\n") + + // If it's a server error... + if strings.HasPrefix(line, _ERR_OP_) { + // Remove -ERR, trim spaces and quotes, and convert to lower case. + line = normalizeErr(line) + return errors.New("nats: " + line) + } + + // Notify that we got an unexpected protocol. + return fmt.Errorf("nats: expected '%s', got '%s'", _PONG_OP_, line) + } + + // This is where we are truly connected. + nc.status = CONNECTED + + return nil +} + +// A control protocol line. +type control struct { + op, args string +} + +// Read a control line and process the intended op. +func (nc *Conn) readOp(c *control) error { + br := bufio.NewReaderSize(nc.conn, defaultBufSize) + line, err := br.ReadString('\n') + if err != nil { + return err + } + parseControl(line, c) + return nil +} + +// Parse a control line from the server. +func parseControl(line string, c *control) { + toks := strings.SplitN(line, _SPC_, 2) + if len(toks) == 1 { + c.op = strings.TrimSpace(toks[0]) + c.args = _EMPTY_ + } else if len(toks) == 2 { + c.op, c.args = strings.TrimSpace(toks[0]), strings.TrimSpace(toks[1]) + } else { + c.op = _EMPTY_ + } +} + +// flushReconnectPending will push the pending items that were +// gathered while we were in a RECONNECTING state to the socket. +func (nc *Conn) flushReconnectPendingItems() { + if nc.pending == nil { + return + } + if nc.pending.Len() > 0 { + nc.bw.Write(nc.pending.Bytes()) + } +} + +// Try to reconnect using the option parameters. +// This function assumes we are allowed to reconnect. +func (nc *Conn) doReconnect() { + // We want to make sure we have the other watchers shutdown properly + // here before we proceed past this point. + nc.mu.Lock() + wg := nc.wg + nc.mu.Unlock() + nc.waitForExits(wg) + + // FIXME(dlc) - We have an issue here if we have + // outstanding flush points (pongs) and they were not + // sent out, but are still in the pipe. + + // Hold the lock manually and release where needed below, + // can't do defer here. + nc.mu.Lock() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + // Clear any errors. + nc.err = nil + + // Perform appropriate callback if needed for a disconnect. + if nc.Opts.DisconnectedCB != nil { + nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } + } + + for len(nc.srvPool) > 0 { + cur, err := nc.selectNextServer() + if err != nil { + nc.err = err + break + } + + sleepTime := int64(0) + + // Sleep appropriate amount of time before the + // connection attempt if connecting to same server + // we just got disconnected from.. + if time.Since(cur.lastAttempt) < nc.Opts.ReconnectWait { + sleepTime = int64(nc.Opts.ReconnectWait - time.Since(cur.lastAttempt)) + } + + // On Windows, createConn() will take more than a second when no + // server is running at that address. So it could be that the + // time elapsed between reconnect attempts is always > than + // the set option. Release the lock to give a chance to a parallel + // nc.Close() to break the loop. + nc.mu.Unlock() + if sleepTime <= 0 { + runtime.Gosched() + } else { + time.Sleep(time.Duration(sleepTime)) + } + nc.mu.Lock() + + // Check if we have been closed first. + if nc.isClosed() { + break + } + + // Mark that we tried a reconnect + cur.reconnects++ + + // Try to create a new connection + err = nc.createConn() + + // Not yet connected, retry... + // Continue to hold the lock + if err != nil { + nc.err = nil + continue + } + + // We are reconnected + nc.Reconnects++ + + // Process connect logic + if nc.err = nc.processConnectInit(); nc.err != nil { + nc.status = RECONNECTING + continue + } + + // Clear out server stats for the server we connected to.. + cur.didConnect = true + cur.reconnects = 0 + + // Send existing subscription state + nc.resendSubscriptions() + + // Now send off and clear pending buffer + nc.flushReconnectPendingItems() + + // Flush the buffer + nc.err = nc.bw.Flush() + if nc.err != nil { + nc.status = RECONNECTING + continue + } + + // Done with the pending buffer + nc.pending = nil + + // This is where we are truly connected. + nc.status = CONNECTED + + // Queue up the reconnect callback. + if nc.Opts.ReconnectedCB != nil { + nc.ach <- func() { nc.Opts.ReconnectedCB(nc) } + } + + // Release lock here, we will return below. + nc.mu.Unlock() + + // Make sure to flush everything + nc.Flush() + + return + } + + // Call into close.. We have no servers left.. + if nc.err == nil { + nc.err = ErrNoServers + } + nc.mu.Unlock() + nc.Close() +} + +// processOpErr handles errors from reading or parsing the protocol. +// The lock should not be held entering this function. +func (nc *Conn) processOpErr(err error) { + nc.mu.Lock() + if nc.isConnecting() || nc.isClosed() || nc.isReconnecting() { + nc.mu.Unlock() + return + } + + if nc.Opts.AllowReconnect && nc.status == CONNECTED { + // Set our new status + nc.status = RECONNECTING + if nc.ptmr != nil { + nc.ptmr.Stop() + } + if nc.conn != nil { + nc.bw.Flush() + nc.conn.Close() + nc.conn = nil + } + + // Create a new pending buffer to underpin the bufio Writer while + // we are reconnecting. + nc.pending = &bytes.Buffer{} + nc.bw = bufio.NewWriterSize(nc.pending, nc.Opts.ReconnectBufSize) + + go nc.doReconnect() + nc.mu.Unlock() + return + } + + nc.status = DISCONNECTED + nc.err = err + nc.mu.Unlock() + nc.Close() +} + +// Marker to close the channel to kick out the Go routine. +func (nc *Conn) closeAsyncFunc() asyncCB { + return func() { + nc.mu.Lock() + if nc.ach != nil { + close(nc.ach) + nc.ach = nil + } + nc.mu.Unlock() + } +} + +// asyncDispatch is responsible for calling any async callbacks +func (nc *Conn) asyncDispatch() { + // snapshot since they can change from underneath of us. + nc.mu.Lock() + ach := nc.ach + nc.mu.Unlock() + + // Loop on the channel and process async callbacks. + for { + if f, ok := <-ach; !ok { + return + } else { + f() + } + } +} + +// readLoop() will sit on the socket reading and processing the +// protocol from the server. It will dispatch appropriately based +// on the op type. +func (nc *Conn) readLoop(wg *sync.WaitGroup) { + // Release the wait group on exit + defer wg.Done() + + // Create a parseState if needed. + nc.mu.Lock() + if nc.ps == nil { + nc.ps = &parseState{} + } + nc.mu.Unlock() + + // Stack based buffer. + b := make([]byte, defaultBufSize) + + for { + // FIXME(dlc): RWLock here? + nc.mu.Lock() + sb := nc.isClosed() || nc.isReconnecting() + if sb { + nc.ps = &parseState{} + } + conn := nc.conn + nc.mu.Unlock() + + if sb || conn == nil { + break + } + + n, err := conn.Read(b) + if err != nil { + nc.processOpErr(err) + break + } + + if err := nc.parse(b[:n]); err != nil { + nc.processOpErr(err) + break + } + } + // Clear the parseState here.. + nc.mu.Lock() + nc.ps = nil + nc.mu.Unlock() +} + +// waitForMsgs waits on the conditional shared with readLoop and processMsg. +// It is used to deliver messages to asynchronous subscribers. +func (nc *Conn) waitForMsgs(s *Subscription) { + var closed bool + var delivered, max uint64 + + for { + s.mu.Lock() + if s.pHead == nil && !s.closed { + s.pCond.Wait() + } + // Pop the msg off the list + m := s.pHead + if m != nil { + s.pHead = m.next + if s.pHead == nil { + s.pTail = nil + } + s.pMsgs-- + s.pBytes -= len(m.Data) + } + mcb := s.mcb + max = s.max + closed = s.closed + if !s.closed { + s.delivered++ + delivered = s.delivered + } + s.mu.Unlock() + + if closed { + break + } + + // Deliver the message. + if m != nil && (max == 0 || delivered <= max) { + mcb(m) + } + // If we have hit the max for delivered msgs, remove sub. + if max > 0 && delivered >= max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + break + } + } +} + +// processMsg is called by parse and will place the msg on the +// appropriate channel/pending queue for processing. If the channel is full, +// or the pending queue is over the pending limits, the connection is +// considered a slow consumer. +func (nc *Conn) processMsg(data []byte) { + // Don't lock the connection to avoid server cutting us off if the + // flusher is holding the connection lock, trying to send to the server + // that is itself trying to send data to us. + nc.subsMu.RLock() + + // Stats + nc.InMsgs++ + nc.InBytes += uint64(len(data)) + + sub := nc.subs[nc.ps.ma.sid] + if sub == nil { + nc.subsMu.RUnlock() + return + } + + // Copy them into string + subj := string(nc.ps.ma.subject) + reply := string(nc.ps.ma.reply) + + // Doing message create outside of the sub's lock to reduce contention. + // It's possible that we end-up not using the message, but that's ok. + + // FIXME(dlc): Need to copy, should/can do COW? + msgPayload := make([]byte, len(data)) + copy(msgPayload, data) + + // FIXME(dlc): Should we recycle these containers? + m := &Msg{Data: msgPayload, Subject: subj, Reply: reply, Sub: sub} + + sub.mu.Lock() + + // Subscription internal stats (applicable only for non ChanSubscription's) + if sub.typ != ChanSubscription { + sub.pMsgs++ + if sub.pMsgs > sub.pMsgsMax { + sub.pMsgsMax = sub.pMsgs + } + sub.pBytes += len(m.Data) + if sub.pBytes > sub.pBytesMax { + sub.pBytesMax = sub.pBytes + } + + // Check for a Slow Consumer + if (sub.pMsgsLimit > 0 && sub.pMsgs > sub.pMsgsLimit) || + (sub.pBytesLimit > 0 && sub.pBytes > sub.pBytesLimit) { + goto slowConsumer + } + } + + // We have two modes of delivery. One is the channel, used by channel + // subscribers and syncSubscribers, the other is a linked list for async. + if sub.mch != nil { + select { + case sub.mch <- m: + default: + goto slowConsumer + } + } else { + // Push onto the async pList + if sub.pHead == nil { + sub.pHead = m + sub.pTail = m + sub.pCond.Signal() + } else { + sub.pTail.next = m + sub.pTail = m + } + } + + // Clear SlowConsumer status. + sub.sc = false + + sub.mu.Unlock() + nc.subsMu.RUnlock() + return + +slowConsumer: + sub.dropped++ + sc := !sub.sc + sub.sc = true + // Undo stats from above + if sub.typ != ChanSubscription { + sub.pMsgs-- + sub.pBytes -= len(m.Data) + } + sub.mu.Unlock() + nc.subsMu.RUnlock() + if sc { + // Now we need connection's lock and we may end-up in the situation + // that we were trying to avoid, except that in this case, the client + // is already experiencing client-side slow consumer situation. + nc.mu.Lock() + nc.err = ErrSlowConsumer + if nc.Opts.AsyncErrorCB != nil { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, sub, ErrSlowConsumer) } + } + nc.mu.Unlock() + } +} + +// processPermissionsViolation is called when the server signals a subject +// permissions violation on either publish or subscribe. +func (nc *Conn) processPermissionsViolation(err string) { + nc.mu.Lock() + nc.err = errors.New("nats: " + err) + if nc.Opts.AsyncErrorCB != nil { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, nil, nc.err) } + } + nc.mu.Unlock() +} + +// processAuthorizationViolation is called when the server signals a user +// authorization violation. +func (nc *Conn) processAuthorizationViolation(err string) { + nc.mu.Lock() + nc.err = ErrAuthorization + if nc.Opts.AsyncErrorCB != nil { + nc.ach <- func() { nc.Opts.AsyncErrorCB(nc, nil, ErrAuthorization) } + } + nc.mu.Unlock() +} + +// flusher is a separate Go routine that will process flush requests for the write +// bufio. This allows coalescing of writes to the underlying socket. +func (nc *Conn) flusher(wg *sync.WaitGroup) { + // Release the wait group + defer wg.Done() + + // snapshot the bw and conn since they can change from underneath of us. + nc.mu.Lock() + bw := nc.bw + conn := nc.conn + fch := nc.fch + flusherTimeout := nc.Opts.FlusherTimeout + nc.mu.Unlock() + + if conn == nil || bw == nil { + return + } + + for { + if _, ok := <-fch; !ok { + return + } + nc.mu.Lock() + + // Check to see if we should bail out. + if !nc.isConnected() || nc.isConnecting() || bw != nc.bw || conn != nc.conn { + nc.mu.Unlock() + return + } + if bw.Buffered() > 0 { + // Allow customizing how long we should wait for a flush to be done + // to prevent unhealthy connections blocking the client for too long. + if flusherTimeout > 0 { + conn.SetWriteDeadline(time.Now().Add(flusherTimeout)) + } + + if err := bw.Flush(); err != nil { + if nc.err == nil { + nc.err = err + } + } + conn.SetWriteDeadline(time.Time{}) + } + nc.mu.Unlock() + } +} + +// processPing will send an immediate pong protocol response to the +// server. The server uses this mechanism to detect dead clients. +func (nc *Conn) processPing() { + nc.sendProto(pongProto) +} + +// processPong is used to process responses to the client's ping +// messages. We use pings for the flush mechanism as well. +func (nc *Conn) processPong() { + var ch chan struct{} + + nc.mu.Lock() + if len(nc.pongs) > 0 { + ch = nc.pongs[0] + nc.pongs = nc.pongs[1:] + } + nc.pout = 0 + nc.mu.Unlock() + if ch != nil { + ch <- struct{}{} + } +} + +// processOK is a placeholder for processing OK messages. +func (nc *Conn) processOK() { + // do nothing +} + +// processInfo is used to parse the info messages sent +// from the server. +// This function may update the server pool. +func (nc *Conn) processInfo(info string) error { + if info == _EMPTY_ { + return nil + } + if err := json.Unmarshal([]byte(info), &nc.info); err != nil { + return err + } + urls := nc.info.ConnectURLs + if len(urls) > 0 { + added := false + // If randomization is allowed, shuffle the received array, not the + // entire pool. We want to preserve the pool's order up to this point + // (this would otherwise be problematic for the (re)connect loop). + if !nc.Opts.NoRandomize { + for i := range urls { + j := rand.Intn(i + 1) + urls[i], urls[j] = urls[j], urls[i] + } + } + for _, curl := range urls { + if _, present := nc.urls[curl]; !present { + if err := nc.addURLToPool(fmt.Sprintf("nats://%s", curl), true); err != nil { + continue + } + added = true + } + } + if added && !nc.initc && nc.Opts.DiscoveredServersCB != nil { + nc.ach <- func() { nc.Opts.DiscoveredServersCB(nc) } + } + } + return nil +} + +// processAsyncInfo does the same than processInfo, but is called +// from the parser. Calls processInfo under connection's lock +// protection. +func (nc *Conn) processAsyncInfo(info []byte) { + nc.mu.Lock() + // Ignore errors, we will simply not update the server pool... + nc.processInfo(string(info)) + nc.mu.Unlock() +} + +// LastError reports the last error encountered via the connection. +// It can be used reliably within ClosedCB in order to find out reason +// why connection was closed for example. +func (nc *Conn) LastError() error { + if nc == nil { + return ErrInvalidConnection + } + nc.mu.Lock() + err := nc.err + nc.mu.Unlock() + return err +} + +// processErr processes any error messages from the server and +// sets the connection's lastError. +func (nc *Conn) processErr(e string) { + // Trim, remove quotes, convert to lower case. + e = normalizeErr(e) + + // FIXME(dlc) - process Slow Consumer signals special. + if e == STALE_CONNECTION { + nc.processOpErr(ErrStaleConnection) + } else if strings.HasPrefix(e, PERMISSIONS_ERR) { + nc.processPermissionsViolation(e) + } else if strings.HasPrefix(e, AUTHORIZATION_ERR) { + nc.processAuthorizationViolation(e) + } else { + nc.mu.Lock() + nc.err = errors.New("nats: " + e) + nc.mu.Unlock() + nc.Close() + } +} + +// kickFlusher will send a bool on a channel to kick the +// flush Go routine to flush data to the server. +func (nc *Conn) kickFlusher() { + if nc.bw != nil { + select { + case nc.fch <- struct{}{}: + default: + } + } +} + +// Publish publishes the data argument to the given subject. The data +// argument is left untouched and needs to be correctly interpreted on +// the receiver. +func (nc *Conn) Publish(subj string, data []byte) error { + return nc.publish(subj, _EMPTY_, data) +} + +// PublishMsg publishes the Msg structure, which includes the +// Subject, an optional Reply and an optional Data field. +func (nc *Conn) PublishMsg(m *Msg) error { + if m == nil { + return ErrInvalidMsg + } + return nc.publish(m.Subject, m.Reply, m.Data) +} + +// PublishRequest will perform a Publish() excpecting a response on the +// reply subject. Use Request() for automatically waiting for a response +// inline. +func (nc *Conn) PublishRequest(subj, reply string, data []byte) error { + return nc.publish(subj, reply, data) +} + +// Used for handrolled itoa +const digits = "0123456789" + +// publish is the internal function to publish messages to a nats-server. +// Sends a protocol data message by queuing into the bufio writer +// and kicking the flush go routine. These writes should be protected. +func (nc *Conn) publish(subj, reply string, data []byte) error { + if nc == nil { + return ErrInvalidConnection + } + if subj == "" { + return ErrBadSubject + } + nc.mu.Lock() + + // Proactively reject payloads over the threshold set by server. + msgSize := int64(len(data)) + if msgSize > nc.info.MaxPayload { + nc.mu.Unlock() + return ErrMaxPayload + } + + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + + // Check if we are reconnecting, and if so check if + // we have exceeded our reconnect outbound buffer limits. + if nc.isReconnecting() { + // Flush to underlying buffer. + nc.bw.Flush() + // Check if we are over + if nc.pending.Len() >= nc.Opts.ReconnectBufSize { + nc.mu.Unlock() + return ErrReconnectBufExceeded + } + } + + msgh := nc.scratch[:len(_PUB_P_)] + msgh = append(msgh, subj...) + msgh = append(msgh, ' ') + if reply != "" { + msgh = append(msgh, reply...) + msgh = append(msgh, ' ') + } + + // We could be smarter here, but simple loop is ok, + // just avoid strconv in fast path + // FIXME(dlc) - Find a better way here. + // msgh = strconv.AppendInt(msgh, int64(len(data)), 10) + + var b [12]byte + var i = len(b) + if len(data) > 0 { + for l := len(data); l > 0; l /= 10 { + i -= 1 + b[i] = digits[l%10] + } + } else { + i -= 1 + b[i] = digits[0] + } + + msgh = append(msgh, b[i:]...) + msgh = append(msgh, _CRLF_...) + + _, err := nc.bw.Write(msgh) + if err == nil { + _, err = nc.bw.Write(data) + } + if err == nil { + _, err = nc.bw.WriteString(_CRLF_) + } + if err != nil { + nc.mu.Unlock() + return err + } + + nc.OutMsgs++ + nc.OutBytes += uint64(len(data)) + + if len(nc.fch) == 0 { + nc.kickFlusher() + } + nc.mu.Unlock() + return nil +} + +// respHandler is the global response handler. It will look up +// the appropriate channel based on the last token and place +// the message on the channel if possible. +func (nc *Conn) respHandler(m *Msg) { + rt := respToken(m.Subject) + + nc.mu.Lock() + // Just return if closed. + if nc.isClosed() { + nc.mu.Unlock() + return + } + + // Grab mch + mch := nc.respMap[rt] + // Delete the key regardless, one response only. + // FIXME(dlc) - should we track responses past 1 + // just statistics wise? + delete(nc.respMap, rt) + nc.mu.Unlock() + + // Don't block, let Request timeout instead, mch is + // buffered and we should delete the key before a + // second response is processed. + select { + case mch <- m: + default: + return + } +} + +// Create the response subscription we will use for all +// new style responses. This will be on an _INBOX with an +// additional terminal token. The subscription will be on +// a wildcard. Caller is responsible for ensuring this is +// only called once. +func (nc *Conn) createRespMux(respSub string) error { + s, err := nc.Subscribe(respSub, nc.respHandler) + if err != nil { + return err + } + nc.mu.Lock() + nc.respMux = s + nc.mu.Unlock() + return nil +} + +// Request will send a request payload and deliver the response message, +// or an error, including a timeout if no message was received properly. +func (nc *Conn) Request(subj string, data []byte, timeout time.Duration) (*Msg, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + + nc.mu.Lock() + // If user wants the old style. + if nc.Opts.UseOldRequestStyle { + nc.mu.Unlock() + return nc.oldRequest(subj, data, timeout) + } + + // Do setup for the new style. + if nc.respMap == nil { + // _INBOX wildcard + nc.respSub = fmt.Sprintf("%s.*", NewInbox()) + nc.respMap = make(map[string]chan *Msg) + } + // Create literal Inbox and map to a chan msg. + mch := make(chan *Msg, RequestChanLen) + respInbox := nc.newRespInbox() + token := respToken(respInbox) + nc.respMap[token] = mch + createSub := nc.respMux == nil + ginbox := nc.respSub + nc.mu.Unlock() + + if createSub { + // Make sure scoped subscription is setup only once. + var err error + nc.respSetup.Do(func() { err = nc.createRespMux(ginbox) }) + if err != nil { + return nil, err + } + } + + if err := nc.PublishRequest(subj, respInbox, data); err != nil { + return nil, err + } + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + var ok bool + var msg *Msg + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + case <-t.C: + nc.mu.Lock() + delete(nc.respMap, token) + nc.mu.Unlock() + return nil, ErrTimeout + } + + return msg, nil +} + +// oldRequest will create an Inbox and perform a Request() call +// with the Inbox reply and return the first reply received. +// This is optimized for the case of multiple responses. +func (nc *Conn) oldRequest(subj string, data []byte, timeout time.Duration) (*Msg, error) { + inbox := NewInbox() + ch := make(chan *Msg, RequestChanLen) + + s, err := nc.subscribe(inbox, _EMPTY_, nil, ch) + if err != nil { + return nil, err + } + s.AutoUnsubscribe(1) + defer s.Unsubscribe() + + err = nc.PublishRequest(subj, inbox, data) + if err != nil { + return nil, err + } + return s.NextMsg(timeout) +} + +// InboxPrefix is the prefix for all inbox subjects. +const InboxPrefix = "_INBOX." +const inboxPrefixLen = len(InboxPrefix) +const respInboxPrefixLen = inboxPrefixLen + nuidSize + 1 + +// NewInbox will return an inbox string which can be used for directed replies from +// subscribers. These are guaranteed to be unique, but can be shared and subscribed +// to by others. +func NewInbox() string { + var b [inboxPrefixLen + nuidSize]byte + pres := b[:inboxPrefixLen] + copy(pres, InboxPrefix) + ns := b[inboxPrefixLen:] + copy(ns, nuid.Next()) + return string(b[:]) +} + +// Creates a new literal response subject that will trigger +// the global subscription handler. +func (nc *Conn) newRespInbox() string { + var b [inboxPrefixLen + (2 * nuidSize) + 1]byte + pres := b[:respInboxPrefixLen] + copy(pres, nc.respSub) + ns := b[respInboxPrefixLen:] + copy(ns, nuid.Next()) + return string(b[:]) +} + +// respToken will return the last token of a literal response inbox +// which we use for the message channel lookup. +func respToken(respInbox string) string { + return respInbox[respInboxPrefixLen:] +} + +// Subscribe will express interest in the given subject. The subject +// can have wildcards (partial:*, full:>). Messages will be delivered +// to the associated MsgHandler. If no MsgHandler is given, the +// subscription is a synchronous subscription and can be polled via +// Subscription.NextMsg(). +func (nc *Conn) Subscribe(subj string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, cb, nil) +} + +// ChanSubscribe will place all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanSubscribe(subj string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, _EMPTY_, nil, ch) +} + +// ChanQueueSubscribe will place all messages received on the channel. +// You should not close the channel until sub.Unsubscribe() has been called. +func (nc *Conn) ChanQueueSubscribe(subj, group string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, group, nil, ch) +} + +// SubscribeSync is syntactic sugar for Subscribe(subject, nil). +func (nc *Conn) SubscribeSync(subj string) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + mch := make(chan *Msg, nc.Opts.SubChanLen) + s, e := nc.subscribe(subj, _EMPTY_, nil, mch) + if s != nil { + s.typ = SyncSubscription + } + return s, e +} + +// QueueSubscribe creates an asynchronous queue subscriber on the given subject. +// All subscribers with the same queue name will form the queue group and +// only one member of the group will be selected to receive any given +// message asynchronously. +func (nc *Conn) QueueSubscribe(subj, queue string, cb MsgHandler) (*Subscription, error) { + return nc.subscribe(subj, queue, cb, nil) +} + +// QueueSubscribeSync creates a synchronous queue subscriber on the given +// subject. All subscribers with the same queue name will form the queue +// group and only one member of the group will be selected to receive any +// given message synchronously. +func (nc *Conn) QueueSubscribeSync(subj, queue string) (*Subscription, error) { + mch := make(chan *Msg, nc.Opts.SubChanLen) + s, e := nc.subscribe(subj, queue, nil, mch) + if s != nil { + s.typ = SyncSubscription + } + return s, e +} + +// QueueSubscribeSyncWithChan is syntactic sugar for ChanQueueSubscribe(subject, group, ch). +func (nc *Conn) QueueSubscribeSyncWithChan(subj, queue string, ch chan *Msg) (*Subscription, error) { + return nc.subscribe(subj, queue, nil, ch) +} + +// subscribe is the internal subscribe function that indicates interest in a subject. +func (nc *Conn) subscribe(subj, queue string, cb MsgHandler, ch chan *Msg) (*Subscription, error) { + if nc == nil { + return nil, ErrInvalidConnection + } + nc.mu.Lock() + // ok here, but defer is generally expensive + defer nc.mu.Unlock() + defer nc.kickFlusher() + + // Check for some error conditions. + if nc.isClosed() { + return nil, ErrConnectionClosed + } + + if cb == nil && ch == nil { + return nil, ErrBadSubscription + } + + sub := &Subscription{Subject: subj, Queue: queue, mcb: cb, conn: nc} + // Set pending limits. + sub.pMsgsLimit = DefaultSubPendingMsgsLimit + sub.pBytesLimit = DefaultSubPendingBytesLimit + + // If we have an async callback, start up a sub specific + // Go routine to deliver the messages. + if cb != nil { + sub.typ = AsyncSubscription + sub.pCond = sync.NewCond(&sub.mu) + go nc.waitForMsgs(sub) + } else { + sub.typ = ChanSubscription + sub.mch = ch + } + + nc.subsMu.Lock() + nc.ssid++ + sub.sid = nc.ssid + nc.subs[sub.sid] = sub + nc.subsMu.Unlock() + + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + nc.bw.WriteString(fmt.Sprintf(subProto, subj, queue, sub.sid)) + } + return sub, nil +} + +// Lock for nc should be held here upon entry +func (nc *Conn) removeSub(s *Subscription) { + nc.subsMu.Lock() + delete(nc.subs, s.sid) + nc.subsMu.Unlock() + s.mu.Lock() + defer s.mu.Unlock() + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + + // Mark as invalid + s.conn = nil + s.closed = true + if s.pCond != nil { + s.pCond.Broadcast() + } +} + +// SubscriptionType is the type of the Subscription. +type SubscriptionType int + +// The different types of subscription types. +const ( + AsyncSubscription = SubscriptionType(iota) + SyncSubscription + ChanSubscription + NilSubscription +) + +// Type returns the type of Subscription. +func (s *Subscription) Type() SubscriptionType { + if s == nil { + return NilSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + return s.typ +} + +// IsValid returns a boolean indicating whether the subscription +// is still active. This will return false if the subscription has +// already been closed. +func (s *Subscription) IsValid() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + return s.conn != nil +} + +// Unsubscribe will remove interest in the given subject. +func (s *Subscription) Unsubscribe() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, 0) +} + +// AutoUnsubscribe will issue an automatic Unsubscribe that is +// processed by the server when max messages have been received. +// This can be useful when sending a request to an unknown number +// of subscribers. Request() uses this functionality. +func (s *Subscription) AutoUnsubscribe(max int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + conn := s.conn + s.mu.Unlock() + if conn == nil { + return ErrBadSubscription + } + return conn.unsubscribe(s, max) +} + +// unsubscribe performs the low level unsubscribe to the server. +// Use Subscription.Unsubscribe() +func (nc *Conn) unsubscribe(sub *Subscription, max int) error { + nc.mu.Lock() + // ok here, but defer is expensive + defer nc.mu.Unlock() + defer nc.kickFlusher() + + if nc.isClosed() { + return ErrConnectionClosed + } + + nc.subsMu.RLock() + s := nc.subs[sub.sid] + nc.subsMu.RUnlock() + // Already unsubscribed + if s == nil { + return nil + } + + maxStr := _EMPTY_ + if max > 0 { + s.max = uint64(max) + maxStr = strconv.Itoa(max) + } else { + nc.removeSub(s) + } + // We will send these for all subs when we reconnect + // so that we can suppress here. + if !nc.isReconnecting() { + nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, maxStr)) + } + return nil +} + +// NextMsg will return the next message available to a synchronous subscriber +// or block until one is available. A timeout can be used to return when no +// message has been delivered. +func (s *Subscription) NextMsg(timeout time.Duration) (*Msg, error) { + if s == nil { + return nil, ErrBadSubscription + } + + s.mu.Lock() + err := s.validateNextMsgState() + if err != nil { + s.mu.Unlock() + return nil, err + } + + // snapshot + mch := s.mch + s.mu.Unlock() + + var ok bool + var msg *Msg + + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + select { + case msg, ok = <-mch: + if !ok { + return nil, ErrConnectionClosed + } + err := s.processNextMsgDelivered(msg) + if err != nil { + return nil, err + } + case <-t.C: + return nil, ErrTimeout + } + + return msg, nil +} + +// validateNextMsgState checks whether the subscription is in a valid +// state to call NextMsg and be delivered another message synchronously. +// This should be called while holding the lock. +func (s *Subscription) validateNextMsgState() error { + if s.connClosed { + return ErrConnectionClosed + } + if s.mch == nil { + if s.max > 0 && s.delivered >= s.max { + return ErrMaxMessages + } else if s.closed { + return ErrBadSubscription + } + } + if s.mcb != nil { + return ErrSyncSubRequired + } + if s.sc { + s.sc = false + return ErrSlowConsumer + } + + return nil +} + +// processNextMsgDelivered takes a message and applies the needed +// accounting to the stats from the subscription, returning an +// error in case we have the maximum number of messages have been +// delivered already. It should not be called while holding the lock. +func (s *Subscription) processNextMsgDelivered(msg *Msg) error { + s.mu.Lock() + nc := s.conn + max := s.max + + // Update some stats. + s.delivered++ + delivered := s.delivered + if s.typ == SyncSubscription { + s.pMsgs-- + s.pBytes -= len(msg.Data) + } + s.mu.Unlock() + + if max > 0 { + if delivered > max { + return ErrMaxMessages + } + // Remove subscription if we have reached max. + if delivered == max { + nc.mu.Lock() + nc.removeSub(s) + nc.mu.Unlock() + } + } + + return nil +} + +// Queued returns the number of queued messages in the client for this subscription. +// DEPRECATED: Use Pending() +func (s *Subscription) QueuedMsgs() (int, error) { + m, _, err := s.Pending() + return int(m), err +} + +// Pending returns the number of queued messages and queued bytes in the client for this subscription. +func (s *Subscription) Pending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgs, s.pBytes, nil +} + +// MaxPending returns the maximum number of queued messages and queued bytes seen so far. +func (s *Subscription) MaxPending() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsMax, s.pBytesMax, nil +} + +// ClearMaxPending resets the maximums seen so far. +func (s *Subscription) ClearMaxPending() error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + s.pMsgsMax, s.pBytesMax = 0, 0 + return nil +} + +// Pending Limits +const ( + DefaultSubPendingMsgsLimit = 65536 + DefaultSubPendingBytesLimit = 65536 * 1024 +) + +// PendingLimits returns the current limits for this subscription. +// If no error is returned, a negative value indicates that the +// given metric is not limited. +func (s *Subscription) PendingLimits() (int, int, error) { + if s == nil { + return -1, -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, -1, ErrBadSubscription + } + if s.typ == ChanSubscription { + return -1, -1, ErrTypeSubscription + } + return s.pMsgsLimit, s.pBytesLimit, nil +} + +// SetPendingLimits sets the limits for pending msgs and bytes for this subscription. +// Zero is not allowed. Any negative value means that the given metric is not limited. +func (s *Subscription) SetPendingLimits(msgLimit, bytesLimit int) error { + if s == nil { + return ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return ErrBadSubscription + } + if s.typ == ChanSubscription { + return ErrTypeSubscription + } + if msgLimit == 0 || bytesLimit == 0 { + return ErrInvalidArg + } + s.pMsgsLimit, s.pBytesLimit = msgLimit, bytesLimit + return nil +} + +// Delivered returns the number of delivered messages for this subscription. +func (s *Subscription) Delivered() (int64, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, ErrBadSubscription + } + return int64(s.delivered), nil +} + +// Dropped returns the number of known dropped messages for this subscription. +// This will correspond to messages dropped by violations of PendingLimits. If +// the server declares the connection a SlowConsumer, this number may not be +// valid. +func (s *Subscription) Dropped() (int, error) { + if s == nil { + return -1, ErrBadSubscription + } + s.mu.Lock() + defer s.mu.Unlock() + if s.conn == nil { + return -1, ErrBadSubscription + } + return s.dropped, nil +} + +// FIXME: This is a hack +// removeFlushEntry is needed when we need to discard queued up responses +// for our pings as part of a flush call. This happens when we have a flush +// call outstanding and we call close. +func (nc *Conn) removeFlushEntry(ch chan struct{}) bool { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.pongs == nil { + return false + } + for i, c := range nc.pongs { + if c == ch { + nc.pongs[i] = nil + return true + } + } + return false +} + +// The lock must be held entering this function. +func (nc *Conn) sendPing(ch chan struct{}) { + nc.pongs = append(nc.pongs, ch) + nc.bw.WriteString(pingProto) + // Flush in place. + nc.bw.Flush() +} + +// This will fire periodically and send a client origin +// ping to the server. Will also check that we have received +// responses from the server. +func (nc *Conn) processPingTimer() { + nc.mu.Lock() + + if nc.status != CONNECTED { + nc.mu.Unlock() + return + } + + // Check for violation + nc.pout++ + if nc.pout > nc.Opts.MaxPingsOut { + nc.mu.Unlock() + nc.processOpErr(ErrStaleConnection) + return + } + + nc.sendPing(nil) + nc.ptmr.Reset(nc.Opts.PingInterval) + nc.mu.Unlock() +} + +// FlushTimeout allows a Flush operation to have an associated timeout. +func (nc *Conn) FlushTimeout(timeout time.Duration) (err error) { + if nc == nil { + return ErrInvalidConnection + } + if timeout <= 0 { + return ErrBadTimeout + } + + nc.mu.Lock() + if nc.isClosed() { + nc.mu.Unlock() + return ErrConnectionClosed + } + t := globalTimerPool.Get(timeout) + defer globalTimerPool.Put(t) + + ch := make(chan struct{}) + nc.sendPing(ch) + nc.mu.Unlock() + + select { + case _, ok := <-ch: + if !ok { + err = ErrConnectionClosed + } else { + close(ch) + } + case <-t.C: + err = ErrTimeout + } + + if err != nil { + nc.removeFlushEntry(ch) + } + return +} + +// Flush will perform a round trip to the server and return when it +// receives the internal reply. +func (nc *Conn) Flush() error { + return nc.FlushTimeout(60 * time.Second) +} + +// Buffered will return the number of bytes buffered to be sent to the server. +// FIXME(dlc) take into account disconnected state. +func (nc *Conn) Buffered() (int, error) { + nc.mu.Lock() + defer nc.mu.Unlock() + if nc.isClosed() || nc.bw == nil { + return -1, ErrConnectionClosed + } + return nc.bw.Buffered(), nil +} + +// resendSubscriptions will send our subscription state back to the +// server. Used in reconnects +func (nc *Conn) resendSubscriptions() { + // Since we are going to send protocols to the server, we don't want to + // be holding the subsMu lock (which is used in processMsg). So copy + // the subscriptions in a temporary array. + nc.subsMu.RLock() + subs := make([]*Subscription, 0, len(nc.subs)) + for _, s := range nc.subs { + subs = append(subs, s) + } + nc.subsMu.RUnlock() + for _, s := range subs { + adjustedMax := uint64(0) + s.mu.Lock() + if s.max > 0 { + if s.delivered < s.max { + adjustedMax = s.max - s.delivered + } + + // adjustedMax could be 0 here if the number of delivered msgs + // reached the max, if so unsubscribe. + if adjustedMax == 0 { + s.mu.Unlock() + nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, _EMPTY_)) + continue + } + } + s.mu.Unlock() + + nc.bw.WriteString(fmt.Sprintf(subProto, s.Subject, s.Queue, s.sid)) + if adjustedMax > 0 { + maxStr := strconv.Itoa(int(adjustedMax)) + nc.bw.WriteString(fmt.Sprintf(unsubProto, s.sid, maxStr)) + } + } +} + +// This will clear any pending flush calls and release pending calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingFlushCalls() { + // Clear any queued pongs, e.g. pending flush calls. + for _, ch := range nc.pongs { + if ch != nil { + close(ch) + } + } + nc.pongs = nil +} + +// This will clear any pending Request calls. +// Lock is assumed to be held by the caller. +func (nc *Conn) clearPendingRequestCalls() { + if nc.respMap == nil { + return + } + for key, ch := range nc.respMap { + if ch != nil { + close(ch) + delete(nc.respMap, key) + } + } +} + +// Low level close call that will do correct cleanup and set +// desired status. Also controls whether user defined callbacks +// will be triggered. The lock should not be held entering this +// function. This function will handle the locking manually. +func (nc *Conn) close(status Status, doCBs bool) { + nc.mu.Lock() + if nc.isClosed() { + nc.status = status + nc.mu.Unlock() + return + } + nc.status = CLOSED + + // Kick the Go routines so they fall out. + nc.kickFlusher() + nc.mu.Unlock() + + nc.mu.Lock() + + // Clear any queued pongs, e.g. pending flush calls. + nc.clearPendingFlushCalls() + + // Clear any queued and blocking Requests. + nc.clearPendingRequestCalls() + + if nc.ptmr != nil { + nc.ptmr.Stop() + } + + // Go ahead and make sure we have flushed the outbound + if nc.conn != nil { + nc.bw.Flush() + defer nc.conn.Close() + } + + // Close sync subscriber channels and release any + // pending NextMsg() calls. + nc.subsMu.Lock() + for _, s := range nc.subs { + s.mu.Lock() + + // Release callers on NextMsg for SyncSubscription only + if s.mch != nil && s.typ == SyncSubscription { + close(s.mch) + } + s.mch = nil + // Mark as invalid, for signaling to deliverMsgs + s.closed = true + // Mark connection closed in subscription + s.connClosed = true + // If we have an async subscription, signals it to exit + if s.typ == AsyncSubscription && s.pCond != nil { + s.pCond.Signal() + } + + s.mu.Unlock() + } + nc.subs = nil + nc.subsMu.Unlock() + + // Perform appropriate callback if needed for a disconnect. + if doCBs { + if nc.Opts.DisconnectedCB != nil && nc.conn != nil { + nc.ach <- func() { nc.Opts.DisconnectedCB(nc) } + } + if nc.Opts.ClosedCB != nil { + nc.ach <- func() { nc.Opts.ClosedCB(nc) } + } + nc.ach <- nc.closeAsyncFunc() + } + nc.status = status + nc.mu.Unlock() +} + +// Close will close the connection to the server. This call will release +// all blocking calls, such as Flush() and NextMsg() +func (nc *Conn) Close() { + nc.close(CLOSED, true) +} + +// IsClosed tests if a Conn has been closed. +func (nc *Conn) IsClosed() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isClosed() +} + +// IsReconnecting tests if a Conn is reconnecting. +func (nc *Conn) IsReconnecting() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isReconnecting() +} + +// IsConnected tests if a Conn is connected. +func (nc *Conn) IsConnected() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.isConnected() +} + +// caller must lock +func (nc *Conn) getServers(implicitOnly bool) []string { + poolSize := len(nc.srvPool) + var servers = make([]string, 0) + for i := 0; i < poolSize; i++ { + if implicitOnly && !nc.srvPool[i].isImplicit { + continue + } + url := nc.srvPool[i].url + servers = append(servers, fmt.Sprintf("%s://%s", url.Scheme, url.Host)) + } + return servers +} + +// Servers returns the list of known server urls, including additional +// servers discovered after a connection has been established. If +// authentication is enabled, use UserInfo or Token when connecting with +// these urls. +func (nc *Conn) Servers() []string { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.getServers(false) +} + +// DiscoveredServers returns only the server urls that have been discovered +// after a connection has been established. If authentication is enabled, +// use UserInfo or Token when connecting with these urls. +func (nc *Conn) DiscoveredServers() []string { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.getServers(true) +} + +// Status returns the current state of the connection. +func (nc *Conn) Status() Status { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.status +} + +// Test if Conn has been closed Lock is assumed held. +func (nc *Conn) isClosed() bool { + return nc.status == CLOSED +} + +// Test if Conn is in the process of connecting +func (nc *Conn) isConnecting() bool { + return nc.status == CONNECTING +} + +// Test if Conn is being reconnected. +func (nc *Conn) isReconnecting() bool { + return nc.status == RECONNECTING +} + +// Test if Conn is connected or connecting. +func (nc *Conn) isConnected() bool { + return nc.status == CONNECTED +} + +// Stats will return a race safe copy of the Statistics section for the connection. +func (nc *Conn) Stats() Statistics { + // Stats are updated either under connection's mu or subsMu mutexes. + // Lock both to safely get them. + nc.mu.Lock() + nc.subsMu.RLock() + stats := Statistics{ + InMsgs: nc.InMsgs, + InBytes: nc.InBytes, + OutMsgs: nc.OutMsgs, + OutBytes: nc.OutBytes, + Reconnects: nc.Reconnects, + } + nc.subsMu.RUnlock() + nc.mu.Unlock() + return stats +} + +// MaxPayload returns the size limit that a message payload can have. +// This is set by the server configuration and delivered to the client +// upon connect. +func (nc *Conn) MaxPayload() int64 { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.MaxPayload +} + +// AuthRequired will return if the connected server requires authorization. +func (nc *Conn) AuthRequired() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.AuthRequired +} + +// TLSRequired will return if the connected server requires TLS connections. +func (nc *Conn) TLSRequired() bool { + nc.mu.Lock() + defer nc.mu.Unlock() + return nc.info.TLSRequired +} diff --git a/vendor/github.com/nats-io/go-nats/netchan.go b/vendor/github.com/nats-io/go-nats/netchan.go new file mode 100644 index 0000000..0608fd7 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/netchan.go @@ -0,0 +1,100 @@ +// Copyright 2013-2017 Apcera Inc. All rights reserved. + +package nats + +import ( + "errors" + "reflect" +) + +// This allows the functionality for network channels by binding send and receive Go chans +// to subjects and optionally queue groups. +// Data will be encoded and decoded via the EncodedConn and its associated encoders. + +// BindSendChan binds a channel for send operations to NATS. +func (c *EncodedConn) BindSendChan(subject string, channel interface{}) error { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return ErrChanArg + } + go chPublish(c, chVal, subject) + return nil +} + +// Publish all values that arrive on the channel until it is closed or we +// encounter an error. +func chPublish(c *EncodedConn, chVal reflect.Value, subject string) { + for { + val, ok := chVal.Recv() + if !ok { + // Channel has most likely been closed. + return + } + if e := c.Publish(subject, val.Interface()); e != nil { + // Do this under lock. + c.Conn.mu.Lock() + defer c.Conn.mu.Unlock() + + if c.Conn.Opts.AsyncErrorCB != nil { + // FIXME(dlc) - Not sure this is the right thing to do. + // FIXME(ivan) - If the connection is not yet closed, try to schedule the callback + if c.Conn.isClosed() { + go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) + } else { + c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) } + } + } + return + } + } +} + +// BindRecvChan binds a channel for receive operations from NATS. +func (c *EncodedConn) BindRecvChan(subject string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, _EMPTY_, channel) +} + +// BindRecvQueueChan binds a channel for queue-based receive operations from NATS. +func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel interface{}) (*Subscription, error) { + return c.bindRecvChan(subject, queue, channel) +} + +// Internal function to bind receive operations for a channel. +func (c *EncodedConn) bindRecvChan(subject, queue string, channel interface{}) (*Subscription, error) { + chVal := reflect.ValueOf(channel) + if chVal.Kind() != reflect.Chan { + return nil, ErrChanArg + } + argType := chVal.Type().Elem() + + cb := func(m *Msg) { + var oPtr reflect.Value + if argType.Kind() != reflect.Ptr { + oPtr = reflect.New(argType) + } else { + oPtr = reflect.New(argType.Elem()) + } + if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil { + c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error()) + if c.Conn.Opts.AsyncErrorCB != nil { + c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) } + } + return + } + if argType.Kind() != reflect.Ptr { + oPtr = reflect.Indirect(oPtr) + } + // This is a bit hacky, but in this instance we may be trying to send to a closed channel. + // and the user does not know when it is safe to close the channel. + defer func() { + // If we have panicked, recover and close the subscription. + if r := recover(); r != nil { + m.Sub.Unsubscribe() + } + }() + // Actually do the send to the channel. + chVal.Send(oPtr) + } + + return c.Conn.subscribe(subject, queue, cb, nil) +} diff --git a/vendor/github.com/nats-io/go-nats/parser.go b/vendor/github.com/nats-io/go-nats/parser.go new file mode 100644 index 0000000..8359b8b --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/parser.go @@ -0,0 +1,470 @@ +// Copyright 2012-2017 Apcera Inc. All rights reserved. + +package nats + +import ( + "fmt" +) + +type msgArg struct { + subject []byte + reply []byte + sid int64 + size int +} + +const MAX_CONTROL_LINE_SIZE = 1024 + +type parseState struct { + state int + as int + drop int + ma msgArg + argBuf []byte + msgBuf []byte + scratch [MAX_CONTROL_LINE_SIZE]byte +} + +const ( + OP_START = iota + OP_PLUS + OP_PLUS_O + OP_PLUS_OK + OP_MINUS + OP_MINUS_E + OP_MINUS_ER + OP_MINUS_ERR + OP_MINUS_ERR_SPC + MINUS_ERR_ARG + OP_M + OP_MS + OP_MSG + OP_MSG_SPC + MSG_ARG + MSG_PAYLOAD + MSG_END + OP_P + OP_PI + OP_PIN + OP_PING + OP_PO + OP_PON + OP_PONG + OP_I + OP_IN + OP_INF + OP_INFO + OP_INFO_SPC + INFO_ARG +) + +// parse is the fast protocol parser engine. +func (nc *Conn) parse(buf []byte) error { + var i int + var b byte + + // Move to loop instead of range syntax to allow jumping of i + for i = 0; i < len(buf); i++ { + b = buf[i] + + switch nc.ps.state { + case OP_START: + switch b { + case 'M', 'm': + nc.ps.state = OP_M + case 'P', 'p': + nc.ps.state = OP_P + case '+': + nc.ps.state = OP_PLUS + case '-': + nc.ps.state = OP_MINUS + case 'I', 'i': + nc.ps.state = OP_I + default: + goto parseErr + } + case OP_M: + switch b { + case 'S', 's': + nc.ps.state = OP_MS + default: + goto parseErr + } + case OP_MS: + switch b { + case 'G', 'g': + nc.ps.state = OP_MSG + default: + goto parseErr + } + case OP_MSG: + switch b { + case ' ', '\t': + nc.ps.state = OP_MSG_SPC + default: + goto parseErr + } + case OP_MSG_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MSG_ARG + nc.ps.as = i + } + case MSG_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + if err := nc.processMsgArgs(arg); err != nil { + return err + } + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD + + // jump ahead with the index. If this overruns + // what is left we fall out and process split + // buffer. + i = nc.ps.as + nc.ps.ma.size - 1 + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case MSG_PAYLOAD: + if nc.ps.msgBuf != nil { + if len(nc.ps.msgBuf) >= nc.ps.ma.size { + nc.processMsg(nc.ps.msgBuf) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END + } else { + // copy as much as we can to the buffer and skip ahead. + toCopy := nc.ps.ma.size - len(nc.ps.msgBuf) + avail := len(buf) - i + + if avail < toCopy { + toCopy = avail + } + + if toCopy > 0 { + start := len(nc.ps.msgBuf) + // This is needed for copy to work. + nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy] + copy(nc.ps.msgBuf[start:], buf[i:i+toCopy]) + // Update our index + i = (i + toCopy) - 1 + } else { + nc.ps.msgBuf = append(nc.ps.msgBuf, b) + } + } + } else if i-nc.ps.as >= nc.ps.ma.size { + nc.processMsg(buf[nc.ps.as:i]) + nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END + } + case MSG_END: + switch b { + case '\n': + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + continue + } + case OP_PLUS: + switch b { + case 'O', 'o': + nc.ps.state = OP_PLUS_O + default: + goto parseErr + } + case OP_PLUS_O: + switch b { + case 'K', 'k': + nc.ps.state = OP_PLUS_OK + default: + goto parseErr + } + case OP_PLUS_OK: + switch b { + case '\n': + nc.processOK() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_MINUS: + switch b { + case 'E', 'e': + nc.ps.state = OP_MINUS_E + default: + goto parseErr + } + case OP_MINUS_E: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ER + default: + goto parseErr + } + case OP_MINUS_ER: + switch b { + case 'R', 'r': + nc.ps.state = OP_MINUS_ERR + default: + goto parseErr + } + case OP_MINUS_ERR: + switch b { + case ' ', '\t': + nc.ps.state = OP_MINUS_ERR_SPC + default: + goto parseErr + } + case OP_MINUS_ERR_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = MINUS_ERR_ARG + nc.ps.as = i + } + case MINUS_ERR_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processErr(string(arg)) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + case OP_P: + switch b { + case 'I', 'i': + nc.ps.state = OP_PI + case 'O', 'o': + nc.ps.state = OP_PO + default: + goto parseErr + } + case OP_PO: + switch b { + case 'N', 'n': + nc.ps.state = OP_PON + default: + goto parseErr + } + case OP_PON: + switch b { + case 'G', 'g': + nc.ps.state = OP_PONG + default: + goto parseErr + } + case OP_PONG: + switch b { + case '\n': + nc.processPong() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_PI: + switch b { + case 'N', 'n': + nc.ps.state = OP_PIN + default: + goto parseErr + } + case OP_PIN: + switch b { + case 'G', 'g': + nc.ps.state = OP_PING + default: + goto parseErr + } + case OP_PING: + switch b { + case '\n': + nc.processPing() + nc.ps.drop, nc.ps.state = 0, OP_START + } + case OP_I: + switch b { + case 'N', 'n': + nc.ps.state = OP_IN + default: + goto parseErr + } + case OP_IN: + switch b { + case 'F', 'f': + nc.ps.state = OP_INF + default: + goto parseErr + } + case OP_INF: + switch b { + case 'O', 'o': + nc.ps.state = OP_INFO + default: + goto parseErr + } + case OP_INFO: + switch b { + case ' ', '\t': + nc.ps.state = OP_INFO_SPC + default: + goto parseErr + } + case OP_INFO_SPC: + switch b { + case ' ', '\t': + continue + default: + nc.ps.state = INFO_ARG + nc.ps.as = i + } + case INFO_ARG: + switch b { + case '\r': + nc.ps.drop = 1 + case '\n': + var arg []byte + if nc.ps.argBuf != nil { + arg = nc.ps.argBuf + nc.ps.argBuf = nil + } else { + arg = buf[nc.ps.as : i-nc.ps.drop] + } + nc.processAsyncInfo(arg) + nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START + default: + if nc.ps.argBuf != nil { + nc.ps.argBuf = append(nc.ps.argBuf, b) + } + } + default: + goto parseErr + } + } + // Check for split buffer scenarios + if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...) + // FIXME, check max len + } + // Check for split msg + if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil { + // We need to clone the msgArg if it is still referencing the + // read buffer and we are not able to process the msg. + if nc.ps.argBuf == nil { + nc.cloneMsgArg() + } + + // If we will overflow the scratch buffer, just create a + // new buffer to hold the split message. + if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) { + lrem := len(buf[nc.ps.as:]) + + nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size) + copy(nc.ps.msgBuf, buf[nc.ps.as:]) + } else { + nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)] + nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...) + } + } + + return nil + +parseErr: + return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:]) +} + +// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but +// we need to hold onto it into the next read. +func (nc *Conn) cloneMsgArg() { + nc.ps.argBuf = nc.ps.scratch[:0] + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...) + nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...) + nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)] + if nc.ps.ma.reply != nil { + nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):] + } +} + +const argsLenMax = 4 + +func (nc *Conn) processMsgArgs(arg []byte) error { + // Unroll splitArgs to avoid runtime/heap issues + a := [argsLenMax][]byte{} + args := a[:0] + start := -1 + for i, b := range arg { + switch b { + case ' ', '\t', '\r', '\n': + if start >= 0 { + args = append(args, arg[start:i]) + start = -1 + } + default: + if start < 0 { + start = i + } + } + } + if start >= 0 { + args = append(args, arg[start:]) + } + + switch len(args) { + case 3: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = nil + nc.ps.ma.size = int(parseInt64(args[2])) + case 4: + nc.ps.ma.subject = args[0] + nc.ps.ma.sid = parseInt64(args[1]) + nc.ps.ma.reply = args[2] + nc.ps.ma.size = int(parseInt64(args[3])) + default: + return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg) + } + if nc.ps.ma.sid < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg) + } + if nc.ps.ma.size < 0 { + return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg) + } + return nil +} + +// Ascii numbers 0-9 +const ( + ascii_0 = 48 + ascii_9 = 57 +) + +// parseInt64 expects decimal positive numbers. We +// return -1 to signal error +func parseInt64(d []byte) (n int64) { + if len(d) == 0 { + return -1 + } + for _, dec := range d { + if dec < ascii_0 || dec > ascii_9 { + return -1 + } + n = n*10 + (int64(dec) - ascii_0) + } + return n +} diff --git a/vendor/github.com/nats-io/go-nats/timer.go b/vendor/github.com/nats-io/go-nats/timer.go new file mode 100644 index 0000000..1b96fd5 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/timer.go @@ -0,0 +1,43 @@ +package nats + +import ( + "sync" + "time" +) + +// global pool of *time.Timer's. can be used by multiple goroutines concurrently. +var globalTimerPool timerPool + +// timerPool provides GC-able pooling of *time.Timer's. +// can be used by multiple goroutines concurrently. +type timerPool struct { + p sync.Pool +} + +// Get returns a timer that completes after the given duration. +func (tp *timerPool) Get(d time.Duration) *time.Timer { + if t, _ := tp.p.Get().(*time.Timer); t != nil { + t.Reset(d) + return t + } + + return time.NewTimer(d) +} + +// Put pools the given timer. +// +// There is no need to call t.Stop() before calling Put. +// +// Put will try to stop the timer before pooling. If the +// given timer already expired, Put will read the unreceived +// value if there is one. +func (tp *timerPool) Put(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } + + tp.p.Put(t) +} diff --git a/vendor/github.com/nats-io/go-nats/util/tls.go b/vendor/github.com/nats-io/go-nats/util/tls.go new file mode 100644 index 0000000..51da0b8 --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/util/tls.go @@ -0,0 +1,37 @@ +// Copyright 2016 Apcera Inc. All rights reserved. +// +build go1.7 + +package util + +import ( + "crypto/tls" +) + +// CloneTLSConfig returns a copy of c. Only the exported fields are copied. +// This is temporary, until this is provided by the language. +// https://go-review.googlesource.com/#/c/28075/ +func CloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, + Renegotiation: c.Renegotiation, + } +} diff --git a/vendor/github.com/nats-io/go-nats/util/tls_pre17.go b/vendor/github.com/nats-io/go-nats/util/tls_pre17.go new file mode 100644 index 0000000..db198ae --- /dev/null +++ b/vendor/github.com/nats-io/go-nats/util/tls_pre17.go @@ -0,0 +1,35 @@ +// Copyright 2016 Apcera Inc. All rights reserved. +// +build go1.5,!go1.7 + +package util + +import ( + "crypto/tls" +) + +// CloneTLSConfig returns a copy of c. Only the exported fields are copied. +// This is temporary, until this is provided by the language. +// https://go-review.googlesource.com/#/c/28075/ +func CloneTLSConfig(c *tls.Config) *tls.Config { + return &tls.Config{ + Rand: c.Rand, + Time: c.Time, + Certificates: c.Certificates, + NameToCertificate: c.NameToCertificate, + GetCertificate: c.GetCertificate, + RootCAs: c.RootCAs, + NextProtos: c.NextProtos, + ServerName: c.ServerName, + ClientAuth: c.ClientAuth, + ClientCAs: c.ClientCAs, + InsecureSkipVerify: c.InsecureSkipVerify, + CipherSuites: c.CipherSuites, + PreferServerCipherSuites: c.PreferServerCipherSuites, + SessionTicketsDisabled: c.SessionTicketsDisabled, + SessionTicketKey: c.SessionTicketKey, + ClientSessionCache: c.ClientSessionCache, + MinVersion: c.MinVersion, + MaxVersion: c.MaxVersion, + CurvePreferences: c.CurvePreferences, + } +} diff --git a/vendor/github.com/nats-io/nuid/nuid.go b/vendor/github.com/nats-io/nuid/nuid.go new file mode 100644 index 0000000..1fda377 --- /dev/null +++ b/vendor/github.com/nats-io/nuid/nuid.go @@ -0,0 +1,124 @@ +// Copyright 2016 Apcera Inc. All rights reserved. + +// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly. +package nuid + +import ( + "crypto/rand" + "fmt" + "math" + "math/big" + "sync" + "time" + + prand "math/rand" +) + +// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly. +// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data +// that is started at a pseudo random number and increments with a pseudo-random increment. +// Total is 22 bytes of base 62 ascii text :) + +// Version of the library +const Version = "1.0.0" + +const ( + digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + base = 62 + preLen = 12 + seqLen = 10 + maxSeq = int64(839299365868340224) // base^seqLen == 62^10 + minInc = int64(33) + maxInc = int64(333) + totalLen = preLen + seqLen +) + +type NUID struct { + pre []byte + seq int64 + inc int64 +} + +type lockedNUID struct { + sync.Mutex + *NUID +} + +// Global NUID +var globalNUID *lockedNUID + +// Seed sequential random with crypto or math/random and current time +// and generate crypto prefix. +func init() { + r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64)) + if err != nil { + prand.Seed(time.Now().UnixNano()) + } else { + prand.Seed(r.Int64()) + } + globalNUID = &lockedNUID{NUID: New()} + globalNUID.RandomizePrefix() +} + +// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment. +func New() *NUID { + n := &NUID{ + seq: prand.Int63n(maxSeq), + inc: minInc + prand.Int63n(maxInc-minInc), + pre: make([]byte, preLen), + } + n.RandomizePrefix() + return n +} + +// Generate the next NUID string from the global locked NUID instance. +func Next() string { + globalNUID.Lock() + nuid := globalNUID.Next() + globalNUID.Unlock() + return nuid +} + +// Generate the next NUID string. +func (n *NUID) Next() string { + // Increment and capture. + n.seq += n.inc + if n.seq >= maxSeq { + n.RandomizePrefix() + n.resetSequential() + } + seq := n.seq + + // Copy prefix + var b [totalLen]byte + bs := b[:preLen] + copy(bs, n.pre) + + // copy in the seq in base36. + for i, l := len(b), seq; i > preLen; l /= base { + i -= 1 + b[i] = digits[l%base] + } + return string(b[:]) +} + +// Resets the sequential portion of the NUID. +func (n *NUID) resetSequential() { + n.seq = prand.Int63n(maxSeq) + n.inc = minInc + prand.Int63n(maxInc-minInc) +} + +// Generate a new prefix from crypto/rand. +// This call *can* drain entropy and will be called automatically when we exhaust the sequential range. +// Will panic if it gets an error from rand.Int() +func (n *NUID) RandomizePrefix() { + var cb [preLen]byte + cbs := cb[:] + if nb, err := rand.Read(cbs); nb != preLen || err != nil { + panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err)) + } + + for i := 0; i < preLen; i++ { + n.pre[i] = digits[int(cbs[i])%base] + } +} diff --git a/vendor/github.com/robfig/cron/constantdelay.go b/vendor/github.com/robfig/cron/constantdelay.go new file mode 100644 index 0000000..cd6e7b1 --- /dev/null +++ b/vendor/github.com/robfig/cron/constantdelay.go @@ -0,0 +1,27 @@ +package cron + +import "time" + +// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes". +// It does not support jobs more frequent than once a second. +type ConstantDelaySchedule struct { + Delay time.Duration +} + +// Every returns a crontab Schedule that activates once every duration. +// Delays of less than a second are not supported (will round up to 1 second). +// Any fields less than a Second are truncated. +func Every(duration time.Duration) ConstantDelaySchedule { + if duration < time.Second { + duration = time.Second + } + return ConstantDelaySchedule{ + Delay: duration - time.Duration(duration.Nanoseconds())%time.Second, + } +} + +// Next returns the next time this should be run. +// This rounds so that the next activation time will be on the second. +func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time { + return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond) +} diff --git a/vendor/github.com/robfig/cron/cron.go b/vendor/github.com/robfig/cron/cron.go new file mode 100644 index 0000000..2318aeb --- /dev/null +++ b/vendor/github.com/robfig/cron/cron.go @@ -0,0 +1,259 @@ +package cron + +import ( + "log" + "runtime" + "sort" + "time" +) + +// Cron keeps track of any number of entries, invoking the associated func as +// specified by the schedule. It may be started, stopped, and the entries may +// be inspected while running. +type Cron struct { + entries []*Entry + stop chan struct{} + add chan *Entry + snapshot chan []*Entry + running bool + ErrorLog *log.Logger + location *time.Location +} + +// Job is an interface for submitted cron jobs. +type Job interface { + Run() +} + +// The Schedule describes a job's duty cycle. +type Schedule interface { + // Return the next activation time, later than the given time. + // Next is invoked initially, and then each time the job is run. + Next(time.Time) time.Time +} + +// Entry consists of a schedule and the func to execute on that schedule. +type Entry struct { + // The schedule on which this job should be run. + Schedule Schedule + + // The next time the job will run. This is the zero time if Cron has not been + // started or this entry's schedule is unsatisfiable + Next time.Time + + // The last time this job was run. This is the zero time if the job has never + // been run. + Prev time.Time + + // The Job to run. + Job Job +} + +// byTime is a wrapper for sorting the entry array by time +// (with zero time at the end). +type byTime []*Entry + +func (s byTime) Len() int { return len(s) } +func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s byTime) Less(i, j int) bool { + // Two zero times should return false. + // Otherwise, zero is "greater" than any other time. + // (To sort it at the end of the list.) + if s[i].Next.IsZero() { + return false + } + if s[j].Next.IsZero() { + return true + } + return s[i].Next.Before(s[j].Next) +} + +// New returns a new Cron job runner, in the Local time zone. +func New() *Cron { + return NewWithLocation(time.Now().Location()) +} + +// NewWithLocation returns a new Cron job runner. +func NewWithLocation(location *time.Location) *Cron { + return &Cron{ + entries: nil, + add: make(chan *Entry), + stop: make(chan struct{}), + snapshot: make(chan []*Entry), + running: false, + ErrorLog: nil, + location: location, + } +} + +// A wrapper that turns a func() into a cron.Job +type FuncJob func() + +func (f FuncJob) Run() { f() } + +// AddFunc adds a func to the Cron to be run on the given schedule. +func (c *Cron) AddFunc(spec string, cmd func()) error { + return c.AddJob(spec, FuncJob(cmd)) +} + +// AddJob adds a Job to the Cron to be run on the given schedule. +func (c *Cron) AddJob(spec string, cmd Job) error { + schedule, err := Parse(spec) + if err != nil { + return err + } + c.Schedule(schedule, cmd) + return nil +} + +// Schedule adds a Job to the Cron to be run on the given schedule. +func (c *Cron) Schedule(schedule Schedule, cmd Job) { + entry := &Entry{ + Schedule: schedule, + Job: cmd, + } + if !c.running { + c.entries = append(c.entries, entry) + return + } + + c.add <- entry +} + +// Entries returns a snapshot of the cron entries. +func (c *Cron) Entries() []*Entry { + if c.running { + c.snapshot <- nil + x := <-c.snapshot + return x + } + return c.entrySnapshot() +} + +// Location gets the time zone location +func (c *Cron) Location() *time.Location { + return c.location +} + +// Start the cron scheduler in its own go-routine, or no-op if already started. +func (c *Cron) Start() { + if c.running { + return + } + c.running = true + go c.run() +} + +// Run the cron scheduler, or no-op if already running. +func (c *Cron) Run() { + if c.running { + return + } + c.running = true + c.run() +} + +func (c *Cron) runWithRecovery(j Job) { + defer func() { + if r := recover(); r != nil { + const size = 64 << 10 + buf := make([]byte, size) + buf = buf[:runtime.Stack(buf, false)] + c.logf("cron: panic running job: %v\n%s", r, buf) + } + }() + j.Run() +} + +// Run the scheduler. this is private just due to the need to synchronize +// access to the 'running' state variable. +func (c *Cron) run() { + // Figure out the next activation times for each entry. + now := c.now() + for _, entry := range c.entries { + entry.Next = entry.Schedule.Next(now) + } + + for { + // Determine the next entry to run. + sort.Sort(byTime(c.entries)) + + var timer *time.Timer + if len(c.entries) == 0 || c.entries[0].Next.IsZero() { + // If there are no entries yet, just sleep - it still handles new entries + // and stop requests. + timer = time.NewTimer(100000 * time.Hour) + } else { + timer = time.NewTimer(c.entries[0].Next.Sub(now)) + } + + for { + select { + case now = <-timer.C: + now = now.In(c.location) + // Run every entry whose next time was less than now + for _, e := range c.entries { + if e.Next.After(now) || e.Next.IsZero() { + break + } + go c.runWithRecovery(e.Job) + e.Prev = e.Next + e.Next = e.Schedule.Next(now) + } + + case newEntry := <-c.add: + timer.Stop() + now = c.now() + newEntry.Next = newEntry.Schedule.Next(now) + c.entries = append(c.entries, newEntry) + + case <-c.snapshot: + c.snapshot <- c.entrySnapshot() + continue + + case <-c.stop: + timer.Stop() + return + } + + break + } + } +} + +// Logs an error to stderr or to the configured error log +func (c *Cron) logf(format string, args ...interface{}) { + if c.ErrorLog != nil { + c.ErrorLog.Printf(format, args...) + } else { + log.Printf(format, args...) + } +} + +// Stop stops the cron scheduler if it is running; otherwise it does nothing. +func (c *Cron) Stop() { + if !c.running { + return + } + c.stop <- struct{}{} + c.running = false +} + +// entrySnapshot returns a copy of the current cron entry list. +func (c *Cron) entrySnapshot() []*Entry { + entries := []*Entry{} + for _, e := range c.entries { + entries = append(entries, &Entry{ + Schedule: e.Schedule, + Next: e.Next, + Prev: e.Prev, + Job: e.Job, + }) + } + return entries +} + +// now returns current time in c location +func (c *Cron) now() time.Time { + return time.Now().In(c.location) +} diff --git a/vendor/github.com/robfig/cron/doc.go b/vendor/github.com/robfig/cron/doc.go new file mode 100644 index 0000000..3700cf6 --- /dev/null +++ b/vendor/github.com/robfig/cron/doc.go @@ -0,0 +1,129 @@ +/* +Package cron implements a cron spec parser and job runner. + +Usage + +Callers may register Funcs to be invoked on a given schedule. Cron will run +them in their own goroutines. + + c := cron.New() + c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") }) + c.AddFunc("@hourly", func() { fmt.Println("Every hour") }) + c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") }) + c.Start() + .. + // Funcs are invoked in their own goroutine, asynchronously. + ... + // Funcs may also be added to a running Cron + c.AddFunc("@daily", func() { fmt.Println("Every day") }) + .. + // Inspect the cron job entries' next and previous run times. + inspect(c.Entries()) + .. + c.Stop() // Stop the scheduler (does not stop any jobs already running). + +CRON Expression Format + +A cron expression represents a set of times, using 6 space-separated fields. + + Field name | Mandatory? | Allowed values | Allowed special characters + ---------- | ---------- | -------------- | -------------------------- + Seconds | Yes | 0-59 | * / , - + Minutes | Yes | 0-59 | * / , - + Hours | Yes | 0-23 | * / , - + Day of month | Yes | 1-31 | * / , - ? + Month | Yes | 1-12 or JAN-DEC | * / , - + Day of week | Yes | 0-6 or SUN-SAT | * / , - ? + +Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun", +and "sun" are equally accepted. + +Special Characters + +Asterisk ( * ) + +The asterisk indicates that the cron expression will match for all values of the +field; e.g., using an asterisk in the 5th field (month) would indicate every +month. + +Slash ( / ) + +Slashes are used to describe increments of ranges. For example 3-59/15 in the +1st field (minutes) would indicate the 3rd minute of the hour and every 15 +minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...", +that is, an increment over the largest possible range of the field. The form +"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the +increment until the end of that specific range. It does not wrap around. + +Comma ( , ) + +Commas are used to separate items of a list. For example, using "MON,WED,FRI" in +the 5th field (day of week) would mean Mondays, Wednesdays and Fridays. + +Hyphen ( - ) + +Hyphens are used to define ranges. For example, 9-17 would indicate every +hour between 9am and 5pm inclusive. + +Question mark ( ? ) + +Question mark may be used instead of '*' for leaving either day-of-month or +day-of-week blank. + +Predefined schedules + +You may use one of several pre-defined schedules in place of a cron expression. + + Entry | Description | Equivalent To + ----- | ----------- | ------------- + @yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 * + @monthly | Run once a month, midnight, first of month | 0 0 0 1 * * + @weekly | Run once a week, midnight on Sunday | 0 0 0 * * 0 + @daily (or @midnight) | Run once a day, midnight | 0 0 0 * * * + @hourly | Run once an hour, beginning of hour | 0 0 * * * * + +Intervals + +You may also schedule a job to execute at fixed intervals, starting at the time it's added +or cron is run. This is supported by formatting the cron spec like this: + + @every + +where "duration" is a string accepted by time.ParseDuration +(http://golang.org/pkg/time/#ParseDuration). + +For example, "@every 1h30m10s" would indicate a schedule that activates immediately, +and then every 1 hour, 30 minutes, 10 seconds. + +Note: The interval does not take the job runtime into account. For example, +if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes, +it will have only 2 minutes of idle time between each run. + +Time zones + +All interpretation and scheduling is done in the machine's local time zone (as +provided by the Go time package (http://www.golang.org/pkg/time). + +Be aware that jobs scheduled during daylight-savings leap-ahead transitions will +not be run! + +Thread safety + +Since the Cron service runs concurrently with the calling code, some amount of +care must be taken to ensure proper synchronization. + +All cron methods are designed to be correctly synchronized as long as the caller +ensures that invocations have a clear happens-before ordering between them. + +Implementation + +Cron entries are stored in an array, sorted by their next activation time. Cron +sleeps until the next job is due to be run. + +Upon waking: + - it runs each entry that is active on that second + - it calculates the next run times for the jobs that were run + - it re-sorts the array of entries by next activation time. + - it goes to sleep until the soonest job. +*/ +package cron diff --git a/vendor/github.com/robfig/cron/parser.go b/vendor/github.com/robfig/cron/parser.go new file mode 100644 index 0000000..a5e83c0 --- /dev/null +++ b/vendor/github.com/robfig/cron/parser.go @@ -0,0 +1,380 @@ +package cron + +import ( + "fmt" + "math" + "strconv" + "strings" + "time" +) + +// Configuration options for creating a parser. Most options specify which +// fields should be included, while others enable features. If a field is not +// included the parser will assume a default value. These options do not change +// the order fields are parse in. +type ParseOption int + +const ( + Second ParseOption = 1 << iota // Seconds field, default 0 + Minute // Minutes field, default 0 + Hour // Hours field, default 0 + Dom // Day of month field, default * + Month // Month field, default * + Dow // Day of week field, default * + DowOptional // Optional day of week field, default * + Descriptor // Allow descriptors such as @monthly, @weekly, etc. +) + +var places = []ParseOption{ + Second, + Minute, + Hour, + Dom, + Month, + Dow, +} + +var defaults = []string{ + "0", + "0", + "0", + "*", + "*", + "*", +} + +// A custom Parser that can be configured. +type Parser struct { + options ParseOption + optionals int +} + +// Creates a custom Parser with custom options. +// +// // Standard parser without descriptors +// specParser := NewParser(Minute | Hour | Dom | Month | Dow) +// sched, err := specParser.Parse("0 0 15 */3 *") +// +// // Same as above, just excludes time fields +// subsParser := NewParser(Dom | Month | Dow) +// sched, err := specParser.Parse("15 */3 *") +// +// // Same as above, just makes Dow optional +// subsParser := NewParser(Dom | Month | DowOptional) +// sched, err := specParser.Parse("15 */3") +// +func NewParser(options ParseOption) Parser { + optionals := 0 + if options&DowOptional > 0 { + options |= Dow + optionals++ + } + return Parser{options, optionals} +} + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// It accepts crontab specs and features configured by NewParser. +func (p Parser) Parse(spec string) (Schedule, error) { + if len(spec) == 0 { + return nil, fmt.Errorf("Empty spec string") + } + if spec[0] == '@' && p.options&Descriptor > 0 { + return parseDescriptor(spec) + } + + // Figure out how many fields we need + max := 0 + for _, place := range places { + if p.options&place > 0 { + max++ + } + } + min := max - p.optionals + + // Split fields on whitespace + fields := strings.Fields(spec) + + // Validate number of fields + if count := len(fields); count < min || count > max { + if min == max { + return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec) + } + return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec) + } + + // Fill in missing fields + fields = expandFields(fields, p.options) + + var err error + field := func(field string, r bounds) uint64 { + if err != nil { + return 0 + } + var bits uint64 + bits, err = getField(field, r) + return bits + } + + var ( + second = field(fields[0], seconds) + minute = field(fields[1], minutes) + hour = field(fields[2], hours) + dayofmonth = field(fields[3], dom) + month = field(fields[4], months) + dayofweek = field(fields[5], dow) + ) + if err != nil { + return nil, err + } + + return &SpecSchedule{ + Second: second, + Minute: minute, + Hour: hour, + Dom: dayofmonth, + Month: month, + Dow: dayofweek, + }, nil +} + +func expandFields(fields []string, options ParseOption) []string { + n := 0 + count := len(fields) + expFields := make([]string, len(places)) + copy(expFields, defaults) + for i, place := range places { + if options&place > 0 { + expFields[i] = fields[n] + n++ + } + if n == count { + break + } + } + return expFields +} + +var standardParser = NewParser( + Minute | Hour | Dom | Month | Dow | Descriptor, +) + +// ParseStandard returns a new crontab schedule representing the given standardSpec +// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always +// pass 5 entries representing: minute, hour, day of month, month and day of week, +// in that order. It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Standard crontab specs, e.g. "* * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func ParseStandard(standardSpec string) (Schedule, error) { + return standardParser.Parse(standardSpec) +} + +var defaultParser = NewParser( + Second | Minute | Hour | Dom | Month | DowOptional | Descriptor, +) + +// Parse returns a new crontab schedule representing the given spec. +// It returns a descriptive error if the spec is not valid. +// +// It accepts +// - Full crontab specs, e.g. "* * * * * ?" +// - Descriptors, e.g. "@midnight", "@every 1h30m" +func Parse(spec string) (Schedule, error) { + return defaultParser.Parse(spec) +} + +// getField returns an Int with the bits set representing all of the times that +// the field represents or error parsing field value. A "field" is a comma-separated +// list of "ranges". +func getField(field string, r bounds) (uint64, error) { + var bits uint64 + ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' }) + for _, expr := range ranges { + bit, err := getRange(expr, r) + if err != nil { + return bits, err + } + bits |= bit + } + return bits, nil +} + +// getRange returns the bits indicated by the given expression: +// number | number "-" number [ "/" number ] +// or error parsing range. +func getRange(expr string, r bounds) (uint64, error) { + var ( + start, end, step uint + rangeAndStep = strings.Split(expr, "/") + lowAndHigh = strings.Split(rangeAndStep[0], "-") + singleDigit = len(lowAndHigh) == 1 + err error + ) + + var extra uint64 + if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" { + start = r.min + end = r.max + extra = starBit + } else { + start, err = parseIntOrName(lowAndHigh[0], r.names) + if err != nil { + return 0, err + } + switch len(lowAndHigh) { + case 1: + end = start + case 2: + end, err = parseIntOrName(lowAndHigh[1], r.names) + if err != nil { + return 0, err + } + default: + return 0, fmt.Errorf("Too many hyphens: %s", expr) + } + } + + switch len(rangeAndStep) { + case 1: + step = 1 + case 2: + step, err = mustParseInt(rangeAndStep[1]) + if err != nil { + return 0, err + } + + // Special handling: "N/step" means "N-max/step". + if singleDigit { + end = r.max + } + default: + return 0, fmt.Errorf("Too many slashes: %s", expr) + } + + if start < r.min { + return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr) + } + if end > r.max { + return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr) + } + if start > end { + return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr) + } + if step == 0 { + return 0, fmt.Errorf("Step of range should be a positive number: %s", expr) + } + + return getBits(start, end, step) | extra, nil +} + +// parseIntOrName returns the (possibly-named) integer contained in expr. +func parseIntOrName(expr string, names map[string]uint) (uint, error) { + if names != nil { + if namedInt, ok := names[strings.ToLower(expr)]; ok { + return namedInt, nil + } + } + return mustParseInt(expr) +} + +// mustParseInt parses the given expression as an int or returns an error. +func mustParseInt(expr string) (uint, error) { + num, err := strconv.Atoi(expr) + if err != nil { + return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err) + } + if num < 0 { + return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr) + } + + return uint(num), nil +} + +// getBits sets all bits in the range [min, max], modulo the given step size. +func getBits(min, max, step uint) uint64 { + var bits uint64 + + // If step is 1, use shifts. + if step == 1 { + return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min) + } + + // Else, use a simple loop. + for i := min; i <= max; i += step { + bits |= 1 << i + } + return bits +} + +// all returns all bits within the given bounds. (plus the star bit) +func all(r bounds) uint64 { + return getBits(r.min, r.max, 1) | starBit +} + +// parseDescriptor returns a predefined schedule for the expression, or error if none matches. +func parseDescriptor(descriptor string) (Schedule, error) { + switch descriptor { + case "@yearly", "@annually": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: 1 << months.min, + Dow: all(dow), + }, nil + + case "@monthly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: 1 << dom.min, + Month: all(months), + Dow: all(dow), + }, nil + + case "@weekly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: 1 << dow.min, + }, nil + + case "@daily", "@midnight": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: 1 << hours.min, + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + + case "@hourly": + return &SpecSchedule{ + Second: 1 << seconds.min, + Minute: 1 << minutes.min, + Hour: all(hours), + Dom: all(dom), + Month: all(months), + Dow: all(dow), + }, nil + } + + const every = "@every " + if strings.HasPrefix(descriptor, every) { + duration, err := time.ParseDuration(descriptor[len(every):]) + if err != nil { + return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err) + } + return Every(duration), nil + } + + return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor) +} diff --git a/vendor/github.com/robfig/cron/spec.go b/vendor/github.com/robfig/cron/spec.go new file mode 100644 index 0000000..aac9a60 --- /dev/null +++ b/vendor/github.com/robfig/cron/spec.go @@ -0,0 +1,158 @@ +package cron + +import "time" + +// SpecSchedule specifies a duty cycle (to the second granularity), based on a +// traditional crontab specification. It is computed initially and stored as bit sets. +type SpecSchedule struct { + Second, Minute, Hour, Dom, Month, Dow uint64 +} + +// bounds provides a range of acceptable values (plus a map of name to value). +type bounds struct { + min, max uint + names map[string]uint +} + +// The bounds for each field. +var ( + seconds = bounds{0, 59, nil} + minutes = bounds{0, 59, nil} + hours = bounds{0, 23, nil} + dom = bounds{1, 31, nil} + months = bounds{1, 12, map[string]uint{ + "jan": 1, + "feb": 2, + "mar": 3, + "apr": 4, + "may": 5, + "jun": 6, + "jul": 7, + "aug": 8, + "sep": 9, + "oct": 10, + "nov": 11, + "dec": 12, + }} + dow = bounds{0, 6, map[string]uint{ + "sun": 0, + "mon": 1, + "tue": 2, + "wed": 3, + "thu": 4, + "fri": 5, + "sat": 6, + }} +) + +const ( + // Set the top bit if a star was included in the expression. + starBit = 1 << 63 +) + +// Next returns the next time this schedule is activated, greater than the given +// time. If no time can be found to satisfy the schedule, return the zero time. +func (s *SpecSchedule) Next(t time.Time) time.Time { + // General approach: + // For Month, Day, Hour, Minute, Second: + // Check if the time value matches. If yes, continue to the next field. + // If the field doesn't match the schedule, then increment the field until it matches. + // While incrementing the field, a wrap-around brings it back to the beginning + // of the field list (since it is necessary to re-verify previous field + // values) + + // Start at the earliest possible time (the upcoming second). + t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond) + + // This flag indicates whether a field has been incremented. + added := false + + // If no time is found within five years, return zero. + yearLimit := t.Year() + 5 + +WRAP: + if t.Year() > yearLimit { + return time.Time{} + } + + // Find the first applicable month. + // If it's this month, then do nothing. + for 1< 0 + dowMatch bool = 1< 0 + ) + if s.Dom&starBit > 0 || s.Dow&starBit > 0 { + return domMatch && dowMatch + } + return domMatch || dowMatch +}