initial commit

This commit is contained in:
Cadey Ratio 2017-08-18 00:34:41 -07:00
commit 35f0a86c2c
49 changed files with 8496 additions and 0 deletions

1
.dockerignore Normal file
View File

@ -0,0 +1 @@
.env

16
Dockerfile Normal file
View File

@ -0,0 +1,16 @@
FROM xena/alpine
RUN apk add go build-base # TODO: move to bottom RUN segment
ADD ./vendor /go/src/git.xeserv.us/xena/mercy/vendor
ADD ./internal /go/src/git.xeserv.us/xena/mercy/internal
ADD ./cmd /go/src/git.xeserv.us/xena/mercy/cmd
ENV GOPATH /go
ENV CGO_ENABLED 0
RUN mkdir -p /go/bin && cd /go/bin \
&& go build git.xeserv.us/xena/mercy/cmd/worker \
&& go build git.xeserv.us/xena/mercy/cmd/kronos \
&& go build git.xeserv.us/xena/mercy/cmd/mercyd
#&& apk del go build-base

19
LICENSE Normal file
View File

@ -0,0 +1,19 @@
Copyright (c) 2017 Christine Dodrill <me@christine.website>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgement in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.

25
README.md Normal file
View File

@ -0,0 +1,25 @@
mercy
=====
> The wonders of modern cluster administration
This is basically an experiment in microservices and rqlite to solve a problem
that has been solved before, but this is different because it's simpler and
theoretically much easier to scale.
## Components
- `worker`
workers wait for incoming health check work and execute it, returning the results
to the `results` queue.
- `kronos`
kronos schedules work and records the results of checks.
- `mercyd`
mercyd is the gRPC server for control RPC.
- `mercy`
mercy is a snazzy little command line application for talking to mercyd.
## Rationale
This is a simpler, easier to understand implementation of something I am sure
has been written to death.

140
cmd/kronos/main.go Normal file
View File

@ -0,0 +1,140 @@
package main
import (
"bytes"
"encoding/json"
"fmt"
"log"
"net/http"
"os"
"strconv"
"time"
"git.xeserv.us/xena/mercy/internal/common"
"git.xeserv.us/xena/mercy/internal/database"
"github.com/Xe/gorqlite"
"github.com/Xe/uuid"
"github.com/caarlos0/env"
nats "github.com/nats-io/go-nats"
"github.com/robfig/cron"
)
type config struct {
NatsURL string `env:"NATS_URL,required"`
DatabaseURL string `env:"DATABASE_URL,required"`
Debug bool `env:"DEBUG"`
}
func main() {
var cfg config
err := env.Parse(&cfg)
if err != nil {
log.Fatal(err)
}
nc, err := common.NatsConnect(cfg.NatsURL)
if err != nil {
log.Fatal(err)
}
_ = nc
log.Printf("connecting to %s", cfg.DatabaseURL)
db, err := common.RQLiteConnect(cfg.DatabaseURL)
if err != nil {
log.Fatal(err)
}
if cfg.Debug {
gorqlite.TraceOn(os.Stderr)
}
chks := database.NewChecks(db)
err = chks.Migrate()
if err != nil {
log.Fatal(err)
}
resuls := database.NewResults(db)
err = resuls.Migrate()
if err != nil {
log.Fatal(err)
}
nc.QueueSubscribe("results", "kronos", func(m *nats.Msg) {
var cr common.CheckResult
err := json.Unmarshal(m.Data, &cr)
if err != nil {
log.Printf("results: error when decoding json: %v", err)
return
}
err = resuls.InsResult(cr)
if err != nil {
log.Printf("results: error when inserting result record: %v", err)
return
}
cid, err := strconv.ParseInt(cr.Preamble.CheckID, 10, 64)
if err != nil {
log.Printf("results: %s is not a number: %v", cr.Preamble.CheckID, err)
return
}
chk, err := chks.GetCheck(cid)
if err != nil {
log.Printf("results: can't get check: %v", err)
return
}
_, err = http.Post(chk.ReportWebhook, "application/json", bytes.NewBuffer(m.Data))
if err != nil {
log.Printf("results: http.Post(%q): %v", chk.ReportWebhook, err)
return
}
err = chks.UpdateLastResult(cid, cr.Result)
if err != nil {
log.Printf("results: updating last check result for cid %d: %v", cid, err)
return
}
})
c := cron.New()
c.AddFunc("@every 1m", func() {
log.Printf("scheduling checks")
cl, err := chks.GetAllChecks()
if err != nil {
log.Printf("getAllChecks: %v", err)
return
}
for _, chk := range cl {
go func() {
chr := common.HTTPCheckRequest{
Preamble: common.Preamble{
CustomerID: chk.CustomerID,
CheckID: fmt.Sprintf("%d", chk.ID),
RunID: uuid.New(),
},
URL: chk.URI,
DegradedThreshold: 500 * time.Millisecond,
FailThreshold: 5 * time.Second,
}
data, err := json.Marshal(&chr)
if err != nil {
log.Printf("error in json-encoding check request %#v: %v", chr, err)
return
}
err = nc.Publish("tasks:http", data)
if err != nil {
log.Printf("error in sending nats request checkID: %d: %v", chk.ID, err)
}
}()
}
})
c.Start()
for {
select {}
}
}

7
cmd/mercyd/main.go Normal file
View File

@ -0,0 +1,7 @@
package main
func main() {
for {
select {}
}
}

80
cmd/worker/main.go Normal file
View File

@ -0,0 +1,80 @@
package main
import (
"context"
"encoding/json"
"log"
"net/http"
"os"
"time"
"git.xeserv.us/xena/mercy/internal/common"
"github.com/nats-io/go-nats"
)
func main() {
uri := os.Getenv("NATS_URL")
nc, err := common.NatsConnect(uri)
if err != nil {
log.Fatal("Error establishing connection to NATS:", err)
}
log.Printf("Connected to NATS at: %s", nc.ConnectedUrl())
nc.QueueSubscribe("tasks:http", "worker", func(m *nats.Msg) {
var hc common.HTTPCheckRequest
err := json.Unmarshal(m.Data, &hc)
if err != nil {
log.Printf("tasks:http: error when decoding json: %v", err)
return
}
req, err := http.NewRequest(http.MethodGet, hc.URL, nil)
if err != nil {
log.Printf("tasks:http: error when creating request: %v", err)
return
}
ctx, cancel := context.WithTimeout(context.Background(), hc.FailThreshold)
defer cancel()
req = req.WithContext(ctx)
result := common.CheckResult{Preamble: hc.Preamble}
before := time.Now()
resp, err := http.DefaultClient.Do(req)
if err != nil {
switch err {
case context.Canceled:
result.Result = common.Failed
default:
log.Printf("tasks:http: error when fetching url %s: %v", hc.URL, err)
result.ErrData = err.Error()
}
}
if resp.StatusCode/100 == 2 {
result.Result = common.OK
}
if now := time.Now(); before.Add(hc.DegradedThreshold).Before(now) {
result.Result = common.Degraded
}
result.Timestamp = time.Now()
data, err := json.Marshal(&result)
if err != nil {
log.Printf("tasks:http: error encoding json for %#v: %v", result, err)
return
}
err = nc.Publish("results", data)
if err != nil {
log.Printf("tasks:http: error publishing results: %v", err)
}
})
for {
select {}
}
}

50
docker-compose.yml Normal file
View File

@ -0,0 +1,50 @@
version: "3"
services:
rqlite:
image: rqlite/rqlite:4.0.2
volumes:
- rqlite:/rqlite/file
command: -on-disk -http-adv-addr rqlite:4001
nats:
image: nats:1.0.2
entrypoint: "/gnatsd -DV" # uncomment for debugging message queue issues
expose:
- "4222"
ports:
- "8222:8222"
worker:
image: xena/mercy:$GIT_COMMIT
environment:
NATS_URL: nats://nats:4222
command: /go/bin/worker
depends_on:
- nats
kronos:
image: xena/mercy:$GIT_COMMIT
depends_on:
- nats
- rqlite
environment:
NATS_URL: nats://nats:4222
DATABASE_URL: http://rqlite:4001
command: /go/bin/kronos
mercyd:
image: xena/mercy:$GIT_COMMIT
environment:
NATS_URL: nats://nats:4222
DATABASE_URL: http://rqlite:4001
command: /go/bin/mercyd
ports:
- "42069:42069"
#- "433:433" # uncomment for production, used for SSL certs via Let's Encrypt
depends_on:
- nats
- rqlite
volumes:
rqlite:

30
internal/common/nats.go Normal file
View File

@ -0,0 +1,30 @@
package common
import (
"log"
"time"
"github.com/nats-io/go-nats"
)
func NatsConnect(u string) (*nats.Conn, error) {
var (
nc *nats.Conn
err error
)
for i := 0; i < 5; i++ {
nc, err = nats.Connect(u)
if err == nil {
break
}
log.Printf("sleeping 5 seconds to try again, got error connecting to %s: %v", u, err)
time.Sleep(5 * time.Second)
}
if err != nil {
return nil, err
}
return nc, nil
}

27
internal/common/rqlite.go Normal file
View File

@ -0,0 +1,27 @@
package common
import (
"log"
"time"
"github.com/Xe/gorqlite"
)
func RQLiteConnect(u string) (gorqlite.Connection, error) {
var (
db gorqlite.Connection
err error
)
for i := 0; i < 5; i++ {
db, err = gorqlite.Open(u)
if err == nil {
break
}
log.Printf("sleeping 1 second to try again, got error connecting to %s: %v", u, err)
time.Sleep(time.Second)
}
return db, err
}

43
internal/common/types.go Normal file
View File

@ -0,0 +1,43 @@
package common
import "time"
type Result int
const (
Unknown Result = iota
OK
Degraded
Failed
)
type Preamble struct {
CustomerID string `json:"customer_id"`
CheckID string `json:"check_id"`
RunID string `json:"run_id"`
}
// HTTPCheckRequest
type HTTPCheckRequest struct {
Preamble Preamble `json:"preamble"`
URL string `json:"url"`
DegradedThreshold time.Duration `json:"degraded_threshold"`
FailThreshold time.Duration `json:"fail_threshold"`
}
type CheckResult struct {
Preamble Preamble `json:"preamble"`
Timestamp time.Time `json:"timestamp"`
Result Result `json:"result"`
ErrData string `json:"errdata,omitempty"`
}
type Check struct {
ID int64 `json:"id"`
Timestamp time.Time `json:"timestamp"`
CustomerID string `json:"string"`
Active bool `json:"active"`
URI string `json:"uri"`
ReportWebhook string `json:"report_webhook"`
LastResult Result `json:"last_result"`
}

View File

@ -0,0 +1 @@
package database

195
internal/database/events.go Normal file
View File

@ -0,0 +1,195 @@
package database
import (
"database/sql"
"log"
"time"
"git.xeserv.us/xena/mercy/internal/common"
"github.com/Xe/gorqlite"
)
type Results struct {
conn gorqlite.Connection
insResult gorqlite.PreparedStatement
getResultsForCustomer gorqlite.PreparedStatement
}
func NewResults(conn gorqlite.Connection) *Results {
return &Results{
conn: conn,
insResult: gorqlite.NewPreparedStatement("INSERT INTO results (timestamp, customer_id, check_id, run_id, result, error_msg) VALUES (%d, %s, %s, %s, %d, %s)"),
getResultsForCustomer: gorqlite.NewPreparedStatement("SELECT * FROM results WHERE customer_id=%s"),
}
}
func (r *Results) Migrate() error {
ddl := []string{
`CREATE TABLE IF NOT EXISTS results (id INTEGER PRIMARY KEY, timestamp INTEGER, customer_id TEXT NOT NULL, check_id TEXT NOT NULL, run_id TEXT NOT NULL, result INTEGER NOT NULL, error_msg TEXT)`,
}
_, err := r.conn.Write(ddl)
return err
}
func (r *Results) InsResult(cr common.CheckResult) error {
_, err := r.conn.WriteOne(r.insResult.Bind(cr.Timestamp.Unix(), cr.Preamble.CustomerID, cr.Preamble.CheckID, cr.Preamble.RunID, cr.Result, cr.ErrData))
return err
}
func (r *Results) GetResultsForCustomer(cid string) ([]common.CheckResult, error) {
var result []common.CheckResult
res, err := r.conn.QueryOne(r.getResultsForCustomer.Bind(cid))
if err != nil {
return nil, err
}
if res.NumRows() == 0 {
return nil, sql.ErrNoRows
}
for res.Next() {
var cr common.CheckResult
var ts int64
var checkResult int64
err = res.Scan(&cr.Preamble.CheckID, &ts, &cr.Preamble.CustomerID, &cr.Preamble.CheckID, &cr.Preamble.RunID, &checkResult, &cr.ErrData)
if err != nil {
return nil, err
}
cr.Timestamp = time.Unix(ts, 0)
cr.Result = common.Result(checkResult)
result = append(result, cr)
}
return result, nil
}
type Checks struct {
conn gorqlite.Connection
insCheck gorqlite.PreparedStatement
getCheck gorqlite.PreparedStatement
getAllChecks gorqlite.PreparedStatement
getChecksForCustomer gorqlite.PreparedStatement
activeCheck gorqlite.PreparedStatement
deactiveCheck gorqlite.PreparedStatement
removeCheck gorqlite.PreparedStatement
updateLastRes gorqlite.PreparedStatement
}
func NewChecks(conn gorqlite.Connection) *Checks {
return &Checks{
conn: conn,
insCheck: gorqlite.NewPreparedStatement("INSERT INTO checks (timestamp, customer_id, active, uri, report_webhook, last_result) VALUES (%d, %s, %d, %s, %s, 0)"),
getCheck: gorqlite.NewPreparedStatement("SELECT * FROM checks WHERE id=%d"),
getAllChecks: gorqlite.NewPreparedStatement("SELECT id, timestamp, customer_id, active, uri, report_webhook, last_result FROM checks WHERE active=1"),
getChecksForCustomer: gorqlite.NewPreparedStatement("SELECT * FROM CHECKS WHERE customer_id=%s"),
activeCheck: gorqlite.NewPreparedStatement("UPDATE checks SET active=1 WHERE id=%d"),
deactiveCheck: gorqlite.NewPreparedStatement("UPDATE checks SET active=0 WHERE id=%d"),
removeCheck: gorqlite.NewPreparedStatement("DELETE FROM checks WHERE id=%d"),
updateLastRes: gorqlite.NewPreparedStatement("UPDATE checks SET last_result=%d WHERE id=%d"),
}
}
func (c *Checks) Migrate() error {
ddl := []string{
`CREATE TABLE IF NOT EXISTS checks (id INTEGER PRIMARY KEY, timestamp INTEGER, customer_id TEXT, active INTEGER, uri TEXT, report_webhook TEXT, last_result INTEGER)`,
}
_, err := c.conn.Write(ddl)
return err
}
func (c *Checks) InsertCheck(ch *common.Check) error {
var active int
if ch.Active {
active = 1
}
res, err := c.conn.WriteOne(c.insCheck.Bind(ch.Timestamp.Unix(), ch.CustomerID, active, ch.URI, ch.ReportWebhook))
if err != nil {
return err
}
ch.ID = res.LastInsertID
return nil
}
func (c *Checks) GetAllChecks() ([]common.Check, error) {
var result []common.Check
q := c.getAllChecks.Bind()
log.Println(q)
res, err := c.conn.QueryOne(q)
if err != nil {
return nil, err
}
if res.Err != nil {
return nil, err
}
if res.NumRows() == 0 {
return nil, sql.ErrNoRows
}
for res.Next() {
var ch common.Check
var chid int64
var ts int64
var act int64
var lres int64
err = res.Scan(&chid, &ts, &ch.CustomerID, &act, &ch.URI, &ch.ReportWebhook, &lres)
if err != nil {
return nil, err
}
ch.ID = int64(chid)
ch.Timestamp = time.Unix(int64(ts), 0)
ch.Active = act == 1
ch.LastResult = common.Result(lres)
result = append(result, ch)
}
return result, nil
}
func (c *Checks) GetCheck(cid int64) (*common.Check, error) {
var ch common.Check
res, err := c.conn.QueryOne(c.getCheck.Bind(cid))
if err != nil {
return nil, err
}
if res.NumRows() == 0 {
return nil, sql.ErrNoRows
}
if !res.Next() {
return nil, sql.ErrNoRows
}
var ts int64
var act int64
var lres int64
err = res.Scan(&ch.ID, &ts, &ch.CustomerID, &act, &ch.URI, &ch.ReportWebhook, &lres)
if err != nil {
return nil, err
}
ch.Timestamp = time.Unix(ts, 0)
ch.Active = act == 1
ch.LastResult = common.Result(lres)
return &ch, nil
}
func (c *Checks) UpdateLastResult(cid int64, result common.Result) error {
_, err := c.conn.WriteOne(c.updateLastRes.Bind(int64(result), cid))
return err
}

View File

@ -0,0 +1,40 @@
package database
import (
"os"
"testing"
"time"
"git.xeserv.us/xena/mercy/internal/common"
"github.com/Xe/gorqlite"
)
func TestChecks(t *testing.T) {
gorqlite.TraceOn(os.Stderr)
db, err := gorqlite.Open("http://")
if err != nil {
t.Fatal(err)
}
chks := NewChecks(db)
err = chks.Migrate()
if err != nil {
t.Fatal(err)
}
err = chks.InsertCheck(&common.Check{
Timestamp: time.Now(),
CustomerID: "aliens",
Active: true,
URI: "https://cetacean.club",
ReportWebhook: "https://google.com",
})
if err != nil {
t.Fatal(err)
}
_, err := chks.GetAllChecks()
if err != nil {
t.Fatal(err)
}
}

8
vendor-log Normal file
View File

@ -0,0 +1,8 @@
b4479c874d87db74c2049a1b4abd55eb162c70fb github.com/nats-io/go-nats
b4479c874d87db74c2049a1b4abd55eb162c70fb github.com/nats-io/go-nats/encoders/builtin
b4479c874d87db74c2049a1b4abd55eb162c70fb github.com/nats-io/go-nats/util
3cf34f9fca4e88afa9da8eabd75e3326c9941b44 github.com/nats-io/nuid
0cf029d5748c52beb2c9d20c81880cb4bdf8f788 github.com/caarlos0/env
edf576cd33fb6773ed3042186ba689ced21c55b1 github.com/Xe/gorqlite
736158dc09e10f1911ca3a1e1b01f11b566ce5db github.com/robfig/cron
62b230097e9c9534ca2074782b25d738c4b68964 github.com/Xe/uuid

202
vendor/github.com/Xe/gorqlite/api.go generated vendored Normal file
View File

@ -0,0 +1,202 @@
package gorqlite
/*
this file has low level stuff:
rqliteApiGet()
rqliteApiPost()
There is some code duplication between those and they should
probably be combined into one function.
nothing public here.
*/
import "bytes"
import "encoding/json"
import "errors"
import "fmt"
import "io/ioutil"
import "net/http"
import "time"
/* *****************************************************************
method: rqliteApiGet() - for api_STATUS
- lowest level interface - does not do any JSON unmarshaling
- handles retries
- handles timeouts
* *****************************************************************/
func (conn *Connection) rqliteApiGet(apiOp apiOperation) ([]byte, error) {
var responseBody []byte
trace("%s: rqliteApiGet() called",conn.ID)
// only api_STATUS now - maybe someday BACKUP
if ( apiOp != api_STATUS ) {
return responseBody, errors.New("rqliteApiGet() called for invalid api operation")
}
// just to be safe, check this
peersToTry := conn.cluster.makePeerList()
if ( len(peersToTry) < 1 ) {
return responseBody, errors.New("I don't have any cluster info")
}
trace("%s: I have a peer list %d peers long",conn.ID,len(peersToTry))
// failure log is used so that if all peers fail, we can say something
// about why each failed
failureLog := make([]string,0)
PeerLoop:
for peerNum, peerToTry := range peersToTry {
trace("%s: attemping to contact peer %d",conn.ID,peerNum)
// docs say default GET policy is up to 10 follows automatically
url := conn.assembleURL(api_STATUS,peerToTry)
req, err := http.NewRequest("GET",url,nil)
if ( err != nil ) {
trace("%s: got error '%s' doing http.NewRequest", conn.ID,err.Error())
failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error()))
continue PeerLoop
}
trace("%s: http.NewRequest() OK")
req.Header.Set("Content-Type","application/json")
client := &http.Client{}
client.Timeout = time.Duration(conn.timeout) * time.Second
response, err := client.Do(req)
if ( err != nil ) {
trace("%s: got error '%s' doing client.Do", conn.ID,err.Error())
failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error()))
continue PeerLoop
}
defer response.Body.Close()
trace("%s: client.Do() OK")
responseBody, err := ioutil.ReadAll(response.Body)
if ( err != nil ) {
trace("%s: got error '%s' doing ioutil.ReadAll", conn.ID,err.Error())
failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error()))
continue PeerLoop
}
trace("%s: ioutil.ReadAll() OK")
if ( response.Status != "200 OK" ) {
trace("%s: got code %s",conn.ID,response.Status)
failureLog = append(failureLog,fmt.Sprintf("%s failed, got: %s",url,response.Status))
continue PeerLoop
}
// if we got here, we succeeded
trace("%s: api call OK, returning",conn.ID)
return responseBody, nil
}
// if we got here, all peers failed. Let's build a verbose error message
var stringBuffer bytes.Buffer
stringBuffer.WriteString("tried all peers unsuccessfully. here are the results:\n")
for n, v := range failureLog {
stringBuffer.WriteString(fmt.Sprintf(" peer #%d: %s\n",n,v))
}
return responseBody, errors.New(stringBuffer.String())
}
/* *****************************************************************
method: rqliteApiPost() - for api_QUERY and api_WRITE
- lowest level interface - does not do any JSON unmarshaling
- handles 301s, etc.
- handles retries
- handles timeouts
it is called with an apiOperation type because the URL it will use varies
depending on the API operation type (api_QUERY vs. api_WRITE)
* *****************************************************************/
func (conn *Connection) rqliteApiPost (apiOp apiOperation, sqlStatements []string) ([]byte, error) {
var responseBody []byte
switch (apiOp) {
case api_QUERY:
trace("%s: rqliteApiGet() post called for a QUERY of %d statements",conn.ID,len(sqlStatements))
case api_WRITE:
trace("%s: rqliteApiGet() post called for a QUERY of %d statements",conn.ID,len(sqlStatements))
default:
return responseBody, errors.New("weird! called for an invalid apiOperation in rqliteApiPost()")
}
// jsonify the statements. not really needed in the
// case of api_STATUS but doesn't hurt
jStatements , err := json.Marshal(sqlStatements)
if ( err != nil ) { return nil, err }
// just to be safe, check this
peersToTry := conn.cluster.makePeerList()
if ( len(peersToTry) < 1 ) {
return responseBody, errors.New("I don't have any cluster info")
}
// failure log is used so that if all peers fail, we can say something
// about why each failed
failureLog := make([]string,0)
PeerLoop:
for peerNum, peer := range peersToTry {
trace("%s: trying peer #%d",conn.ID,peerNum)
// we're doing a post, and the RFCs say that if you get a 301, it's not
// automatically followed, so we have to do that ourselves
responseStatus := "Haven't Tried Yet"
var url string
for ( responseStatus == "Haven't Tried Yet" || responseStatus == "301 Moved Permanently" ) {
url = conn.assembleURL(apiOp,peer)
req, err := http.NewRequest("POST",url,bytes.NewBuffer(jStatements))
if ( err != nil ) {
trace("%s: got error '%s' doing http.NewRequest", conn.ID,err.Error())
failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error()))
continue PeerLoop
}
req.Header.Set("Content-Type","application/json")
client := &http.Client{}
response, err := client.Do(req)
if ( err != nil ) {
trace("%s: got error '%s' doing client.Do", conn.ID,err.Error())
failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error()))
continue PeerLoop
}
defer response.Body.Close()
responseBody, err = ioutil.ReadAll(response.Body)
if ( err != nil ) {
trace("%s: got error '%s' doing ioutil.ReadAll", conn.ID,err.Error())
failureLog = append(failureLog,fmt.Sprintf("%s failed due to %s",url,err.Error()))
continue PeerLoop
}
responseStatus = response.Status
if ( responseStatus == "301 Moved Permanently" ) {
v := response.Header["Location"]
failureLog = append(failureLog,fmt.Sprintf("%s redirected me to %s",url,v[0]))
url = v[0]
continue PeerLoop
} else if ( responseStatus == "200 OK" ) {
trace("%s: api call OK, returning",conn.ID)
return responseBody, nil
} else {
trace("%s: got error in responseStatus: %s", conn.ID, responseStatus)
failureLog = append(failureLog,fmt.Sprintf("%s failed, got: %s",url,response.Status))
continue PeerLoop
}
}
}
// if we got here, all peers failed. Let's build a verbose error message
var stringBuffer bytes.Buffer
stringBuffer.WriteString("tried all peers unsuccessfully. here are the results:\n")
for n, v := range failureLog {
stringBuffer.WriteString(fmt.Sprintf(" peer #%d: %s\n",n,v))
}
return responseBody, errors.New(stringBuffer.String())
}

221
vendor/github.com/Xe/gorqlite/cluster.go generated vendored Normal file
View File

@ -0,0 +1,221 @@
package gorqlite
/*
this file holds most of the cluster-related stuff:
types:
peer
rqliteCluster
Connection methods:
assembleURL (from a peer)
updateClusterInfo (does the full cluster discovery via status)
*/
/* *****************************************************************
imports
* *****************************************************************/
import "bytes"
import "encoding/json"
import "errors"
import "fmt"
import "strings"
//import "os"
//import "reflect"
/* *****************************************************************
type: peer
this is an internal type to abstact peer info.
note that hostname is sometimes used for "has this struct been
inialized" checks.
* *****************************************************************/
type peer struct {
hostname string // hostname or "localhost"
port string // "4001" or port, only ever used as a string
}
func (p *peer) String() string {
return fmt.Sprintf("%s:%s",p.hostname,p.port)
}
/* *****************************************************************
type: rqliteCluster
internal type that abstracts the full cluster state (leader, peers)
* *****************************************************************/
type rqliteCluster struct {
leader peer
otherPeers []peer
conn *Connection
}
/* *****************************************************************
method: rqliteCluster.makePeerList()
in the api calls, we'll want to try the leader first, then the other
peers. to make looping easy, this function returns a list of peers
in the order the try them: leader, other peer, other peer, etc.
* *****************************************************************/
func (rc *rqliteCluster) makePeerList() []peer {
trace("%s: makePeerList() called",rc.conn.ID)
var peerList []peer
peerList = append(peerList,rc.leader)
for _, p := range rc.otherPeers {
peerList = append(peerList,p)
}
trace("%s: makePeerList() returning this list:",rc.conn.ID)
for n, v := range peerList {
trace("%s: makePeerList() peer %d -> %s",rc.conn.ID,n,v.hostname + ":" + v.port)
}
return peerList
}
/* *****************************************************************
method: Connection.assembleURL()
tell it what peer to talk to and what kind of API operation you're
making, and it will return the full URL, from start to finish.
e.g.:
https://mary:secret2@server1.example.com:1234/db/query?transaction&level=strong
note: this func needs to live at the Connection level because the
Connection holds the username, password, consistencyLevel, etc.
* *****************************************************************/
func (conn *Connection) assembleURL(apiOp apiOperation, p peer) string {
var stringBuffer bytes.Buffer
if ( conn.wantsHTTPS == true ) {
stringBuffer.WriteString("https")
} else {
stringBuffer.WriteString("http")
}
stringBuffer.WriteString("://")
if ( conn.username != "" && conn.password != "" ) {
stringBuffer.WriteString(conn.username)
stringBuffer.WriteString(":")
stringBuffer.WriteString(conn.password)
stringBuffer.WriteString("@")
}
stringBuffer.WriteString(p.hostname)
stringBuffer.WriteString(":")
stringBuffer.WriteString(p.port)
switch apiOp {
case api_STATUS:
stringBuffer.WriteString("/status")
case api_QUERY:
stringBuffer.WriteString("/db/query")
case api_WRITE:
stringBuffer.WriteString("/db/execute")
}
if ( apiOp == api_QUERY || apiOp == api_WRITE ) {
stringBuffer.WriteString("?timings&transaction&level=")
stringBuffer.WriteString(consistencyLevelNames[conn.consistencyLevel])
}
switch apiOp {
case api_QUERY:
trace("%s: assembled URL for an api_QUERY: %s",conn.ID,stringBuffer.String())
case api_STATUS:
trace("%s: assembled URL for an api_STATUS: %s",conn.ID,stringBuffer.String())
case api_WRITE:
trace("%s: assembled URL for an api_WRITE: %s",conn.ID,stringBuffer.String())
}
return stringBuffer.String()
}
/* *****************************************************************
method: Connection.updateClusterInfo()
upon invocation, updateClusterInfo() completely erases and refreshes
the Connection's cluster info, replacing its rqliteCluster object
with current info.
the web heavy lifting (retrying, etc.) is done in rqliteApiGet()
* *****************************************************************/
func (conn *Connection) updateClusterInfo() error {
trace("%s: updateClusterInfo() called",conn.ID)
// start with a fresh new cluster
var rc rqliteCluster
rc.conn = conn
responseBody, err := conn.rqliteApiGet(api_STATUS)
if ( err != nil ) { return err }
trace("%s: updateClusterInfo() back from api call OK",conn.ID)
sections := make(map[string]interface{})
err = json.Unmarshal(responseBody,&sections)
if ( err != nil ) { return err }
sMap := sections["store"].(map[string]interface{})
leaderRaftAddr := sMap["leader"].(string)
trace("%s: leader from store section is %s",conn.ID,leaderRaftAddr)
// leader in this case is the RAFT address
// we want the HTTP address, so we'll use this as
// a key as we sift through APIPeers
meta := sMap["meta"].(map[string]interface{})
apiPeers := meta["APIPeers"].(map[string]interface{})
for raftAddr, httpAddr := range apiPeers {
trace("%s: examining httpAddr %s",conn.ID,httpAddr)
/* httpAddr are usually hostname:port */
var p peer
parts := strings.Split(httpAddr.(string),":")
p.hostname = parts[0]
p.port = parts[1]
// so is this the leader?
if ( leaderRaftAddr == raftAddr ) {
trace ("%s: found leader at %s",conn.ID,httpAddr)
rc.leader = p
} else {
rc.otherPeers = append(rc.otherPeers, p)
}
}
if ( rc.leader.hostname == "" ) {
return errors.New("could not determine leader from API status call")
}
// dump to trace
trace("%s: here is my cluster config:",conn.ID)
trace("%s: leader : %s",conn.ID,rc.leader.String())
for n, v := range rc.otherPeers {
trace("%s: otherPeer #%d: %s",conn.ID,n,v.String())
}
// now make it official
conn.cluster = rc
return nil
}

303
vendor/github.com/Xe/gorqlite/conn.go generated vendored Normal file
View File

@ -0,0 +1,303 @@
package gorqlite
/*
this file contains some high-level Connection-oriented stuff
*/
/* *****************************************************************
imports
* *****************************************************************/
import "errors"
import "fmt"
import "io"
import "net"
import nurl "net/url"
import "strings"
var errClosed = errors.New("gorqlite: connection is closed")
var traceOut io.Writer
// defaults to false. This is used in trace() to quickly
// return if tracing is off, so that we don't do a perhaps
// expensive Sprintf() call only to send it to Discard
var wantsTrace bool
/* *****************************************************************
type: Connection
* *****************************************************************/
/*
The connection abstraction. Note that since rqlite is stateless,
there really is no "connection". However, this type holds
information such as the current leader, peers, connection
string to build URLs, etc.
Connections are assigned a "connection ID" which is a pseudo-UUID
for connection identification in trace output only. This helps
sort out what's going on if you have multiple connections going
at once. It's generated using a non-standards-or-anything-else-compliant
function that uses crypto/rand to generate 16 random bytes.
Note that the Connection objection holds info on all peers, gathered
at time of Open() from the node specified.
*/
type Connection struct {
cluster rqliteCluster
/*
name type default
*/
username string // username or ""
password string // username or ""
consistencyLevel consistencyLevel // WEAK
wantsHTTPS bool // false unless connection URL is https
// variables below this line need to be initialized in Open()
timeout int // 10
hasBeenClosed bool // false
ID string // generated in init()
}
/* *****************************************************************
method: Connection.Close()
* *****************************************************************/
func (conn *Connection) Close() {
conn.hasBeenClosed = true
trace("%s: %s",conn.ID,"closing connection")
}
/* *****************************************************************
method: Connection.ConsistencyLevel()
* *****************************************************************/
func (conn *Connection) ConsistencyLevel() (string, error) {
if ( conn.hasBeenClosed) {
return "", errClosed
}
return consistencyLevelNames[conn.consistencyLevel], nil
}
/* *****************************************************************
method: Connection.Leader()
* *****************************************************************/
func (conn *Connection) Leader() (string, error) {
if ( conn.hasBeenClosed) {
return "", errClosed
}
trace("%s: Leader(), calling updateClusterInfo()",conn.ID)
err := conn.updateClusterInfo()
if ( err != nil ) {
trace("%s: Leader() got error from updateClusterInfo(): %s",conn.ID,err.Error())
return "", err
} else {
trace("%s: Leader(), updateClusterInfo() OK",conn.ID)
}
return conn.cluster.leader.String(), nil
}
/* *****************************************************************
method: Connection.Peers()
* *****************************************************************/
func (conn *Connection) Peers() ([]string, error) {
if ( conn.hasBeenClosed) {
var ans []string
return ans, errClosed
}
plist := make ([]string,0)
trace("%s: Peers(), calling updateClusterInfo()",conn.ID)
err := conn.updateClusterInfo()
if ( err != nil ) {
trace("%s: Peers() got error from updateClusterInfo(): %s",conn.ID,err.Error())
return plist, err
} else {
trace("%s: Peers(), updateClusterInfo() OK",conn.ID)
}
plist = append(plist,conn.cluster.leader.String())
for _, p := range conn.cluster.otherPeers {
plist = append(plist,p.String())
}
return plist, nil
}
/* *****************************************************************
method: Connection.SetConsistencyLevel()
* *****************************************************************/
func (conn *Connection) SetConsistencyLevel(levelDesired string) error {
if ( conn.hasBeenClosed) {
return errClosed
}
_, ok := consistencyLevels[levelDesired]
if ( ok ) {
conn.consistencyLevel = consistencyLevels[levelDesired]
return nil
}
return errors.New(fmt.Sprintf("unknown consistency level: %s",levelDesired))
}
/* *****************************************************************
method: Connection.initConnection()
* *****************************************************************/
/*
initConnection takes the initial connection URL specified by
the user, and parses it into a peer. This peer is assumed to
be the leader. The next thing Open() does is updateClusterInfo()
so the truth will be revealed soon enough.
initConnection() does not talk to rqlite. It only parses the
connection URL and prepares the new connection for work.
URL format:
http[s]://${USER}:${PASSWORD}@${HOSTNAME}:${PORT}/db?[OPTIONS]
Examples:
https://mary:secret2@localhost:4001/db
https://mary:secret2@server1.example.com:4001/db?level=none
https://mary:secret2@server2.example.com:4001/db?level=weak
https://mary:secret2@localhost:2265/db?level=strong
to use default connection to localhost:4001 with no auth:
http://
https://
guaranteed map fields - will be set to "" if not specified
field name default if not specified
username ""
password ""
hostname "localhost"
port "4001"
consistencyLevel "weak"
*/
func (conn *Connection) initConnection(url string) error {
// do some sanity checks. You know users.
if ( len(url) < 7 ) {
return errors.New("url specified is impossibly short")
}
if ( strings.HasPrefix(url,"http") == false ) {
return errors.New("url does not start with 'http'")
}
u, err := nurl.Parse(url)
if ( err != nil ) {
return err
}
trace("%s: net.url.Parse() OK",conn.ID)
if ( u.Scheme == "https" ) {
conn.wantsHTTPS = true
}
// specs say Username() is always populated even if empty
if u.User == nil {
conn.username = ""
conn.password = ""
} else {
// guaranteed, but could be empty which is ok
conn.username = u.User.Username()
// not guaranteed, so test if set
pass, isset := u.User.Password()
if ( isset ) {
conn.password = pass
} else {
conn.password = ""
}
}
if ( u.Host == "" ) {
conn.cluster.leader.hostname = "localhost"
} else {
conn.cluster.leader.hostname = u.Host
}
if ( u.Host == "" ) {
conn.cluster.leader.hostname = "localhost"
conn.cluster.leader.port = "4001"
} else {
// SplitHostPort() should only return an error if there is no host port.
// I think.
h, p, err := net.SplitHostPort(u.Host)
if ( err != nil ) {
conn.cluster.leader.hostname = u.Host
} else {
conn.cluster.leader.hostname = h
conn.cluster.leader.port = p
}
}
/*
at the moment, the only allowed query is "level=" with
the desired consistency level
*/
// default
conn.consistencyLevel = cl_WEAK
if ( u.RawQuery != "" ) {
if ( u.RawQuery == "level=weak") {
// that's ok but nothing to do
} else if ( u.RawQuery == "level=strong" ) {
conn.consistencyLevel = cl_STRONG
} else if ( u.RawQuery == "level=none" ) { // the fools!
conn.consistencyLevel = cl_NONE
} else {
return errors.New("don't know what to do with this query: " + u.RawQuery)
}
}
trace("%s: parseDefaultPeer() is done:",conn.ID)
if ( conn.wantsHTTPS == true ) {
trace("%s: %s -> %s",conn.ID,"wants https?","yes")
} else {
trace("%s: %s -> %s",conn.ID,"wants https?","no")
}
trace("%s: %s -> %s",conn.ID,"username",conn.username)
trace("%s: %s -> %s",conn.ID,"password",conn.password)
trace("%s: %s -> %s",conn.ID,"hostname",conn.cluster.leader.hostname)
trace("%s: %s -> %s",conn.ID,"port",conn.cluster.leader.port)
trace("%s: %s -> %s",conn.ID,"consistencyLevel",consistencyLevelNames[conn.consistencyLevel])
conn.cluster.conn = conn
return nil
}

187
vendor/github.com/Xe/gorqlite/gorqlite.go generated vendored Normal file
View File

@ -0,0 +1,187 @@
/*
gorqlite
A golang database/sql driver for rqlite, the distributed consistent sqlite.
Copyright (c)2016 andrew fabbro (andrew@fabbro.org)
See LICENSE.md for license. tl;dr: MIT. Conveniently, the same licese as rqlite.
Project home page: https://github.com/raindo308/gorqlite
Learn more about rqlite at: https://github.com/rqlite/rqlite
*/
package gorqlite
/*
this file contains package-level stuff:
consts
init()
Open, TraceOn(), TraceOff()
*/
import "crypto/rand"
import "fmt"
import "io"
import "io/ioutil"
import "strings"
/* *****************************************************************
const
* *****************************************************************/
type consistencyLevel int
const (
cl_NONE consistencyLevel = iota
cl_WEAK
cl_STRONG
)
// used in several places, actually
var consistencyLevelNames map[consistencyLevel]string
var consistencyLevels map[string]consistencyLevel
type apiOperation int
const (
api_QUERY apiOperation = iota
api_STATUS
api_WRITE
)
/* *****************************************************************
init()
* *****************************************************************/
func init() {
traceOut = ioutil.Discard
consistencyLevelNames = make(map[consistencyLevel]string)
consistencyLevelNames[cl_NONE] = "none"
consistencyLevelNames[cl_WEAK] = "weak"
consistencyLevelNames[cl_STRONG] = "strong"
consistencyLevels = make(map[string]consistencyLevel)
consistencyLevels["none"] = cl_NONE
consistencyLevels["weak"] = cl_WEAK
consistencyLevels["strong"] = cl_STRONG
}
/* *****************************************************************
Open() creates and returns a "connection" to rqlite.
Since rqlite is stateless, there is no actual connection. Open() creates and initializes a gorqlite Connection type, which represents various config information.
The URL should be in a form like this:
http://localhost:4001
http:// default, no auth, localhost:4001
https:// default, no auth, localhost:4001, using https
http://localhost:1234
http://mary:secret2@localhost:1234
https://mary:secret2@somewhere.example.com:1234
https://mary:secret2@somewhere.example.com // will use 4001
* *****************************************************************/
func Open(connURL string) (Connection, error) {
var conn Connection
// generate our uuid for trace
b := make([]byte, 16)
_, err := rand.Read(b)
if err != nil {
return conn, err
}
conn.ID = fmt.Sprintf("%X-%X-%X-%X-%X", b[0:4], b[4:6], b[6:8], b[8:10], b[10:])
trace("%s: Open() called for url: %s",conn.ID,connURL)
// set defaults
conn.timeout = 10
conn.hasBeenClosed = false
// parse the URL given
err = conn.initConnection(connURL)
if ( err != nil ) {
return conn, err
}
// call updateClusterInfo() to populate the cluster
// also tests the user's default
err = conn.updateClusterInfo()
// and the err from updateClusterInfo() will be our err as well
return conn, err
}
/* *****************************************************************
func: trace()
adds a message to the trace output
not a public function. we (inside) can add - outside they can
only see.
Call trace as: Sprintf pattern , args...
This is done so that the more expensive Sprintf() stuff is
done only if truly needed. When tracing is off, calls to
trace() just hit a bool check and return. If tracing is on,
then the Sprintfing is done at a leisurely pace because, well,
we're tracing.
Premature optimization is the root of all evil, so this is
probably sinful behavior.
Don't put a \n in your Sprintf pattern becuase trace() adds one
* *****************************************************************/
func trace(pattern string, args ...interface{}) {
// don't do the probably expensive Sprintf() if not needed
if ( wantsTrace == false ) {
return
}
// this could all be made into one long statement but we have
// compilers to do such things for us. let's sip a mint julep
// and spell this out in glorious exposition.
// make sure there is one and only one newline
nlPattern := strings.TrimSpace(pattern) + "\n"
msg := fmt.Sprintf(nlPattern,args...)
traceOut.Write( []byte( msg ) )
}
/*
TraceOn()
Turns on tracing output to the io.Writer of your choice.
Trace output is very detailed and verbose, as you might expect.
Normally, you should run with tracing off, as it makes absolutely
no concession to performance and is intended for debugging/dev use.
*/
func TraceOn(w io.Writer) {
traceOut = w
wantsTrace = true
}
/*
TraceOff()
Turns off tracing output. Once you call TraceOff(), no further
info is sent to the io.Writer, unless it is TraceOn'd again.
*/
func TraceOff() {
wantsTrace = false
traceOut = ioutil.Discard
}

54
vendor/github.com/Xe/gorqlite/prepared_statement.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
package gorqlite
import (
"fmt"
"strings"
)
// EscapeString sql-escapes a string.
func EscapeString(value string) string {
replace := [][2]string{
{`\`, `\\`},
{`\0`, `\\0`},
{`\n`, `\\n`},
{`\r`, `\\r`},
{`"`, `\"`},
{`'`, `\'`},
}
for _, val := range replace {
value = strings.Replace(value, val[0], val[1], -1)
}
return value
}
// PreparedStatement is a simple wrapper around fmt.Sprintf for prepared SQL
// statements.
type PreparedStatement struct {
body string
}
// NewPreparedStatement takes a sprintf syntax SQL query for later binding of
// parameters.
func NewPreparedStatement(body string) PreparedStatement {
return PreparedStatement{body: body}
}
// Bind takes arguments and SQL-escapes them, then calling fmt.Sprintf.
func (p PreparedStatement) Bind(args ...interface{}) string {
var spargs []interface{}
for _, arg := range args {
switch arg.(type) {
case string:
spargs = append(spargs, `'`+EscapeString(arg.(string))+`'`)
case fmt.Stringer:
spargs = append(spargs, `'`+EscapeString(arg.(fmt.Stringer).String())+`'`)
default:
spargs = append(spargs, arg)
}
}
return fmt.Sprintf(p.body, spargs...)
}

396
vendor/github.com/Xe/gorqlite/query.go generated vendored Normal file
View File

@ -0,0 +1,396 @@
package gorqlite
import "errors"
import "fmt"
import "encoding/json"
/* *****************************************************************
method: Connection.Query()
This is the JSON we get back:
{
"results": [
{
"columns": [
"id",
"name"
],
"types": [
"integer",
"text"
],
"values": [
[
1,
"fiona"
],
[
2,
"sinead"
]
],
"time": 0.0150043
}
],
"time": 0.0220043
}
or
{
"results": [
{
"columns": [
"id",
"name"
],
"types": [
"number",
"text"
],
"values": [
[
null,
"Hulk"
]
],
"time": 4.8958e-05
},
{
"columns": [
"id",
"name"
],
"types": [
"number",
"text"
],
"time": 1.8460000000000003e-05
}
],
"time": 0.000134776
}
or
{
"results": [
{
"error": "near \"nonsense\": syntax error"
}
],
"time": 2.478862
}
* *****************************************************************/
/*
QueryOne() is a convenience method that wraps Query() into a single-statement
method.
*/
func (conn *Connection) QueryOne(sqlStatement string) (qr QueryResult, err error) {
if ( conn.hasBeenClosed) {
qr.Err = errClosed
return qr, errClosed
}
sqlStatements := make([]string,0)
sqlStatements = append(sqlStatements,sqlStatement)
qra, err := conn.Query(sqlStatements)
return qra[0], err
}
/*
Query() is used to perform SELECT operations in the database.
It takes an array of SQL statements and executes them in a single transaction, returning an array of QueryResult vars.
*/
func (conn *Connection) Query(sqlStatements []string) (results []QueryResult, err error) {
results = make([]QueryResult,0)
if ( conn.hasBeenClosed) {
var errResult QueryResult
errResult.Err = errClosed
results = append(results,errResult)
return results, errClosed
}
trace("%s: Query() for %d statements",conn.ID,len(sqlStatements))
// if we get an error POSTing, that's a showstopper
response, err := conn.rqliteApiPost(api_QUERY,sqlStatements)
if ( err != nil ) {
trace("%s: rqliteApiCall() ERROR: %s",conn.ID,err.Error())
var errResult QueryResult
errResult.Err = err
results = append(results,errResult)
return results, err
}
trace("%s: rqliteApiCall() OK",conn.ID)
// if we get an error Unmarshaling, that's a showstopper
var sections map[string]interface{}
err = json.Unmarshal(response,&sections)
if ( err != nil ) {
trace("%s: json.Unmarshal() ERROR: %s",conn.ID,err.Error())
var errResult QueryResult
errResult.Err = err
results = append(results,errResult)
return results, err
}
/*
at this point, we have a "results" section and
a "time" section. we can igore the latter.
*/
resultsArray := sections["results"].([]interface{})
trace("%s: I have %d result(s) to parse",conn.ID,len(resultsArray))
numStatementErrors := 0
for n, r := range resultsArray {
trace("%s: parsing result %d",conn.ID,n)
var thisQR QueryResult
thisQR.conn = conn
// r is a hash with columns, types, values, and time
thisResult := r.(map[string]interface{})
// did we get an error?
_, ok := thisResult["error"]
if ok {
trace("%s: have an error on this result: %s",conn.ID,thisResult["error"].(string))
thisQR.Err = errors.New(thisResult["error"].(string))
results = append(results,thisQR)
numStatementErrors++
continue
}
// time is a float64
thisQR.Timing = thisResult["time"].(float64)
// column & type are an array of strings
c := thisResult["columns"].([]interface{})
t := thisResult["types"].([]interface{})
for i := 0; i < len(c) ; i++ {
thisQR.columns = append(thisQR.columns,c[i].(string))
thisQR.types = append(thisQR.types,t[i].(string))
}
// and values are an array of arrays
if ( thisResult["values"] != nil ) {
thisQR.values = thisResult["values"].([]interface{})
} else {
trace("%s: fyi, no values this query",conn.ID)
}
thisQR.rowNumber = -1
trace("%s: this result (#col,time) %d %f",conn.ID,len(thisQR.columns),thisQR.Timing)
results = append(results,thisQR)
}
trace("%s: finished parsing, returning %d results",conn.ID,len(results))
if ( numStatementErrors > 0 ) {
return results, errors.New(fmt.Sprintf("there were %d statement errors",numStatementErrors))
} else {
return results, nil
}
}
/* *****************************************************************
type: QueryResult
* *****************************************************************/
/*
A QueryResult type holds the results of a call to Query(). You could think of it as a rowset.
So if you were to query:
SELECT id, name FROM some_table;
then a QueryResult would hold any errors from that query, a list of columns and types, and the actual row values.
Query() returns an array of QueryResult vars, while QueryOne() returns a single variable.
*/
type QueryResult struct {
conn *Connection
Err error
columns []string
types []string
Timing float64
values []interface{}
rowNumber int64
}
// these are done as getters rather than as public
// variables to prevent monkey business by the user
// that would put us in an inconsistent state
/* *****************************************************************
method: QueryResult.Columns()
* *****************************************************************/
/*
Columns returns a list of the column names for this QueryResult.
*/
func (qr *QueryResult) Columns() []string {
return qr.columns
}
/* *****************************************************************
method: QueryResult.Map()
* *****************************************************************/
/*
Map() returns the current row (as advanced by Next()) as a map[string]interface{}
The key is a string corresponding to a column name.
The value is the corresponding column.
Note that only json values are supported, so you will need to type the interface{} accordingly.
*/
func (qr *QueryResult) Map() (map[string]interface{}, error) {
trace("%s: Map() called for row %d", qr.conn.ID, qr.rowNumber)
ans := make(map[string]interface{})
if ( qr.rowNumber == -1 ) {
return ans, errors.New("you need to Next() before you Map(), sorry, it's complicated")
}
thisRowValues := qr.values[qr.rowNumber].([]interface {})
for i := 0; i<len(qr.columns); i++ {
ans[qr.columns[i]] = thisRowValues[i]
}
return ans, nil
}
/* *****************************************************************
method: QueryResult.Next()
* *****************************************************************/
/*
Next() positions the QueryResult result pointer so that Scan() or Map() is ready.
You should call Next() first, but gorqlite will fix it if you call Map() or Scan() before
the initial Next().
A common idiom:
rows := conn.Write(something)
for rows.Next() {
// your Scan/Map and processing here.
}
*/
func (qr *QueryResult) Next() bool {
if ( qr.rowNumber >= int64(len(qr.values) - 1 )) {
return false
}
qr.rowNumber += 1
return true
}
/* *****************************************************************
method: QueryResult.NumRows()
* *****************************************************************/
/*
NumRows() returns the number of rows returned by the query.
*/
func (qr *QueryResult) NumRows() int64 {
return int64(len(qr.values))
}
/* *****************************************************************
method: QueryResult.RowNumber()
* *****************************************************************/
/*
RowNumber() returns the current row number as Next() iterates through the result's rows.
*/
func (qr *QueryResult) RowNumber() int64 {
return qr.rowNumber
}
/* *****************************************************************
method: QueryResult.Scan()
* *****************************************************************/
/*
Scan() takes a list of pointers and then updates them to reflect he current row's data.
Note that only the following data types are used, and they
are a subset of the types JSON uses:
string, for JSON strings
float64, for JSON numbers
int64, as a convenient extension
nil for JSON null
booleans, JSON arrays, and JSON objects are not supported,
since sqlite does not support them.
*/
func (qr *QueryResult) Scan(dest... interface{}) error {
trace("%s: Scan() called for %d vars", qr.conn.ID,len(dest))
if ( qr.rowNumber == -1 ) {
return errors.New("you need to Next() before you Scan(), sorry, it's complicated")
}
if ( len(dest) != len(qr.columns) ) {
return errors.New(fmt.Sprintf("expected %d columns but got %d vars\n", len(qr.columns), len(dest)))
}
thisRowValues := qr.values[qr.rowNumber].([]interface {})
for n, d := range dest {
switch d.(type) {
case *int64:
f := int64(thisRowValues[n].(float64))
*d.(*int64) = f
case *float64:
f := float64(thisRowValues[n].(float64))
*d.(*float64) = f
case *string:
s := string(thisRowValues[n].(string))
*d.(*string) = s
default:
return errors.New(fmt.Sprintf("unknown destination type to scan into in variable #%d",n))
}
}
return nil
}
/* *****************************************************************
method: QueryResult.Types()
* *****************************************************************/
/*
Types() returns an array of the column's types.
Note that sqlite will repeat the type you tell it, but in many cases, it's ignored. So you can initialize a column as CHAR(3) but it's really TEXT. See https://www.sqlite.org/datatype3.html
This info may additionally conflict with the reality that your data is being JSON encoded/decoded.
*/
func (qr *QueryResult) Types() []string {
return qr.types
}

179
vendor/github.com/Xe/gorqlite/write.go generated vendored Normal file
View File

@ -0,0 +1,179 @@
package gorqlite
/*
this file has
Write()
WriteResult and its methods
*/
import "errors"
import "encoding/json"
import "fmt"
/* *****************************************************************
method: Connection.Write()
This is the JSON we get back:
{
"results": [
{
"last_insert_id": 1,
"rows_affected": 1,
"time": 0.00759015
},
{
"last_insert_id": 2,
"rows_affected": 1,
"time": 0.00669015
}
],
"time": 0.869015
}
or
{
"results": [
{
"error": "table foo already exists"
}
],
"time": 0.18472685400000002
}
We don't care about the overall time. We just want the results,
so we'll take those and put each into a WriteResult
Because the results themselves are smaller than the JSON
(which repeats strings like "last_insert_id" frequently),
we'll just parse everything at once.
* *****************************************************************/
/*
WriteOne() is a convenience method that wraps Write() into a single-statement
method.
*/
func (conn *Connection) WriteOne(sqlStatement string) (wr WriteResult, err error) {
if ( conn.hasBeenClosed) {
wr.Err = errClosed
return wr, errClosed
}
sqlStatements := make([]string,0)
sqlStatements = append(sqlStatements,sqlStatement)
wra , err := conn.Write(sqlStatements)
return wra[0], err
}
/*
Write() is used to perform DDL/DML in the database. ALTER, CREATE, DELETE, DROP, INSERT, UPDATE, etc. all go through Write().
Write() takes an array of SQL statements, and returns an equal-sized array of WriteResults, each corresponding to the SQL statement that produced it.
All statements are executed as a single transaction.
Write() returns an error if one is encountered during its operation. If it's something like a call to the rqlite API, then it'll return that error. If one statement out of several has an error, it will return a generic "there were %d statement errors" and you'll have to look at the individual statement's Err for more info.
*/
func (conn *Connection) Write(sqlStatements []string) (results []WriteResult, err error) {
results = make([]WriteResult,0)
if ( conn.hasBeenClosed) {
var errResult WriteResult
errResult.Err = errClosed
results = append(results,errResult)
return results, errClosed
}
trace("%s: Write() for %d statements",conn.ID,len(sqlStatements))
response, err := conn.rqliteApiPost(api_WRITE,sqlStatements)
if ( err != nil ) {
trace("%s: rqliteApiCall() ERROR: %s",conn.ID,err.Error())
var errResult WriteResult
errResult.Err = err
results = append(results,errResult)
return results, err
}
trace("%s: rqliteApiCall() OK",conn.ID)
var sections map[string]interface{}
err = json.Unmarshal(response,&sections)
if ( err != nil ) {
trace("%s: json.Unmarshal() ERROR: %s",conn.ID,err.Error())
var errResult WriteResult
errResult.Err = err
results = append(results,errResult)
return results, err
}
/*
at this point, we have a "results" section and
a "time" section. we can igore the latter.
*/
resultsArray := sections["results"].([]interface{})
trace("%s: I have %d result(s) to parse",conn.ID,len(resultsArray))
numStatementErrors := 0
for n, k := range resultsArray {
trace("%s: starting on result %d",conn.ID,n)
thisResult := k.(map[string]interface{})
var thisWR WriteResult
thisWR.conn = conn
// did we get an error?
_, ok := thisResult["error"]
if ok {
trace("%s: have an error on this result: %s",conn.ID,thisResult["error"].(string))
thisWR.Err = errors.New(thisResult["error"].(string))
results = append(results,thisWR)
numStatementErrors += 1
continue
}
_, ok = thisResult["last_insert_id"]
if ok {
thisWR.LastInsertID = int64(thisResult["last_insert_id"].(float64))
}
_, ok = thisResult["rows_affected"] // could be zero for a CREATE
if ok {
thisWR.RowsAffected = int64(thisResult["rows_affected"].(float64))
}
thisWR.Timing = thisResult["time"].(float64)
trace("%s: this result (LII,RA,T): %d %d %f",conn.ID,thisWR.LastInsertID,thisWR.RowsAffected,thisWR.Timing)
results = append(results,thisWR)
}
trace("%s: finished parsing, returning %d results",conn.ID,len(results))
if ( numStatementErrors > 0 ) {
return results, errors.New(fmt.Sprintf("there were %d statement errors",numStatementErrors))
} else {
return results, nil
}
}
/* *****************************************************************
type: WriteResult
* *****************************************************************/
/*
A WriteResult holds the result of a single statement sent to Write().
Write() returns an array of WriteResult vars, while WriteOne() returns a single WriteResult.
*/
type WriteResult struct {
Err error // don't trust the rest if this isn't nil
Timing float64
RowsAffected int64 // affected by the change
LastInsertID int64 // if relevant, otherwise zero value
conn *Connection
}

84
vendor/github.com/Xe/uuid/dce.go generated vendored Normal file
View File

@ -0,0 +1,84 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"fmt"
"os"
)
// A Domain represents a Version 2 domain
type Domain byte
// Domain constants for DCE Security (Version 2) UUIDs.
const (
Person = Domain(0)
Group = Domain(1)
Org = Domain(2)
)
// NewDCESecurity returns a DCE Security (Version 2) UUID.
//
// The domain should be one of Person, Group or Org.
// On a POSIX system the id should be the users UID for the Person
// domain and the users GID for the Group. The meaning of id for
// the domain Org or on non-POSIX systems is site defined.
//
// For a given domain/id pair the same token may be returned for up to
// 7 minutes and 10 seconds.
func NewDCESecurity(domain Domain, id uint32) UUID {
uuid := NewUUID()
if uuid != nil {
uuid[6] = (uuid[6] & 0x0f) | 0x20 // Version 2
uuid[9] = byte(domain)
binary.BigEndian.PutUint32(uuid[0:], id)
}
return uuid
}
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCEPerson(Person, uint32(os.Getuid()))
func NewDCEPerson() UUID {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCEGroup(Group, uint32(os.Getgid()))
func NewDCEGroup() UUID {
return NewDCESecurity(Group, uint32(os.Getgid()))
}
// Domain returns the domain for a Version 2 UUID or false.
func (uuid UUID) Domain() (Domain, bool) {
if v, _ := uuid.Version(); v != 2 {
return 0, false
}
return Domain(uuid[9]), true
}
// Id returns the id for a Version 2 UUID or false.
func (uuid UUID) Id() (uint32, bool) {
if v, _ := uuid.Version(); v != 2 {
return 0, false
}
return binary.BigEndian.Uint32(uuid[0:4]), true
}
func (d Domain) String() string {
switch d {
case Person:
return "Person"
case Group:
return "Group"
case Org:
return "Org"
}
return fmt.Sprintf("Domain%d", int(d))
}

8
vendor/github.com/Xe/uuid/doc.go generated vendored Normal file
View File

@ -0,0 +1,8 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The uuid package generates and inspects UUIDs.
//
// UUIDs are based on RFC 4122 and DCE 1.1: Authentication and Security Services.
package uuid

53
vendor/github.com/Xe/uuid/hash.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"crypto/md5"
"crypto/sha1"
"hash"
)
// Well known Name Space IDs and UUIDs
var (
NameSpace_DNS = Parse("6ba7b810-9dad-11d1-80b4-00c04fd430c8")
NameSpace_URL = Parse("6ba7b811-9dad-11d1-80b4-00c04fd430c8")
NameSpace_OID = Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")
NameSpace_X500 = Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")
NIL = Parse("00000000-0000-0000-0000-000000000000")
)
// NewHash returns a new UUID dervied from the hash of space concatenated with
// data generated by h. The hash should be at least 16 byte in length. The
// first 16 bytes of the hash are used to form the UUID. The version of the
// UUID will be the lower 4 bits of version. NewHash is used to implement
// NewMD5 and NewSHA1.
func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
h.Reset()
h.Write(space)
h.Write([]byte(data))
s := h.Sum(nil)
uuid := make([]byte, 16)
copy(uuid, s)
uuid[6] = (uuid[6] & 0x0f) | uint8((version&0xf)<<4)
uuid[8] = (uuid[8] & 0x3f) | 0x80 // RFC 4122 variant
return uuid
}
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data.
//
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data.
//
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

101
vendor/github.com/Xe/uuid/node.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "net"
var (
interfaces []net.Interface // cached list of interfaces
ifname string // name of interface being used
nodeID []byte // hardware for version 1 UUIDs
)
// NodeInterface returns the name of the interface from which the NodeID was
// derived. The interface "user" is returned if the NodeID was set by
// SetNodeID.
func NodeInterface() string {
return ifname
}
// SetNodeInterface selects the hardware address to be used for Version 1 UUIDs.
// If name is "" then the first usable interface found will be used or a random
// Node ID will be generated. If a named interface cannot be found then false
// is returned.
//
// SetNodeInterface never fails when name is "".
func SetNodeInterface(name string) bool {
if interfaces == nil {
var err error
interfaces, err = net.Interfaces()
if err != nil && name != "" {
return false
}
}
for _, ifs := range interfaces {
if len(ifs.HardwareAddr) >= 6 && (name == "" || name == ifs.Name) {
if setNodeID(ifs.HardwareAddr) {
ifname = ifs.Name
return true
}
}
}
// We found no interfaces with a valid hardware address. If name
// does not specify a specific interface generate a random Node ID
// (section 4.1.6)
if name == "" {
if nodeID == nil {
nodeID = make([]byte, 6)
}
randomBits(nodeID)
return true
}
return false
}
// NodeID returns a slice of a copy of the current Node ID, setting the Node ID
// if not already set.
func NodeID() []byte {
if nodeID == nil {
SetNodeInterface("")
}
nid := make([]byte, 6)
copy(nid, nodeID)
return nid
}
// SetNodeID sets the Node ID to be used for Version 1 UUIDs. The first 6 bytes
// of id are used. If id is less than 6 bytes then false is returned and the
// Node ID is not set.
func SetNodeID(id []byte) bool {
if setNodeID(id) {
ifname = "user"
return true
}
return false
}
func setNodeID(id []byte) bool {
if len(id) < 6 {
return false
}
if nodeID == nil {
nodeID = make([]byte, 6)
}
copy(nodeID, id)
return true
}
// NodeID returns the 6 byte node id encoded in uuid. It returns nil if uuid is
// not valid. The NodeID is only well defined for version 1 and 2 UUIDs.
func (uuid UUID) NodeID() []byte {
if len(uuid) != 16 {
return nil
}
node := make([]byte, 6)
copy(node, uuid[10:])
return node
}

132
vendor/github.com/Xe/uuid/time.go generated vendored Normal file
View File

@ -0,0 +1,132 @@
// Copyright 2014 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
"sync"
"time"
)
// A Time represents a time as the number of 100's of nanoseconds since 15 Oct
// 1582.
type Time int64
const (
lillian = 2299160 // Julian day of 15 Oct 1582
unix = 2440587 // Julian day of 1 Jan 1970
epoch = unix - lillian // Days between epochs
g1582 = epoch * 86400 // seconds between epochs
g1582ns100 = g1582 * 10000000 // 100s of a nanoseconds between epochs
)
var (
mu sync.Mutex
lasttime uint64 // last time we returned
clock_seq uint16 // clock sequence for this run
timeNow = time.Now // for testing
)
// UnixTime converts t the number of seconds and nanoseconds using the Unix
// epoch of 1 Jan 1970.
func (t Time) UnixTime() (sec, nsec int64) {
sec = int64(t - g1582ns100)
nsec = (sec % 10000000) * 100
sec /= 10000000
return sec, nsec
}
// GetTime returns the current Time (100s of nanoseconds since 15 Oct 1582) and
// adjusts the clock sequence as needed. An error is returned if the current
// time cannot be determined.
func GetTime() (Time, error) {
defer mu.Unlock()
mu.Lock()
return getTime()
}
func getTime() (Time, error) {
t := timeNow()
// If we don't have a clock sequence already, set one.
if clock_seq == 0 {
setClockSequence(-1)
}
now := uint64(t.UnixNano()/100) + g1582ns100
// If time has gone backwards with this clock sequence then we
// increment the clock sequence
if now <= lasttime {
clock_seq = ((clock_seq + 1) & 0x3fff) | 0x8000
}
lasttime = now
return Time(now), nil
}
// ClockSequence returns the current clock sequence, generating one if not
// already set. The clock sequence is only used for Version 1 UUIDs.
//
// The uuid package does not use global static storage for the clock sequence or
// the last time a UUID was generated. Unless SetClockSequence a new random
// clock sequence is generated the first time a clock sequence is requested by
// ClockSequence, GetTime, or NewUUID. (section 4.2.1.1) sequence is generated
// for
func ClockSequence() int {
defer mu.Unlock()
mu.Lock()
return clockSequence()
}
func clockSequence() int {
if clock_seq == 0 {
setClockSequence(-1)
}
return int(clock_seq & 0x3fff)
}
// SetClockSeq sets the clock sequence to the lower 14 bits of seq. Setting to
// -1 causes a new sequence to be generated.
func SetClockSequence(seq int) {
defer mu.Unlock()
mu.Lock()
setClockSequence(seq)
}
func setClockSequence(seq int) {
if seq == -1 {
var b [2]byte
randomBits(b[:]) // clock sequence
seq = int(b[0])<<8 | int(b[1])
}
old_seq := clock_seq
clock_seq = uint16(seq&0x3fff) | 0x8000 // Set our variant
if old_seq != clock_seq {
lasttime = 0
}
}
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. It returns false if uuid is not valid. The time is only well defined
// for version 1 and 2 UUIDs.
func (uuid UUID) Time() (Time, bool) {
if len(uuid) != 16 {
return 0, false
}
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
return Time(time), true
}
// ClockSequence returns the clock sequence encoded in uuid. It returns false
// if uuid is not valid. The clock sequence is only well defined for version 1
// and 2 UUIDs.
func (uuid UUID) ClockSequence() (int, bool) {
if len(uuid) != 16 {
return 0, false
}
return int(binary.BigEndian.Uint16(uuid[8:10])) & 0x3fff, true
}

43
vendor/github.com/Xe/uuid/util.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// randomBits completely fills slice b with random data.
func randomBits(b []byte) {
if _, err := io.ReadFull(rander, b); err != nil {
panic(err.Error()) // rand should never fail
}
}
// xvalues returns the value of a byte as a hexadecimal digit or 255.
var xvalues = []byte{
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 10, 11, 12, 13, 14, 15, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
}
// xtob converts the the first two hex bytes of x into a byte.
func xtob(x string) (byte, bool) {
b1 := xvalues[x[0]]
b2 := xvalues[x[1]]
return (b1 << 4) | b2, b1 != 255 && b2 != 255
}

163
vendor/github.com/Xe/uuid/uuid.go generated vendored Normal file
View File

@ -0,0 +1,163 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"bytes"
"crypto/rand"
"fmt"
"io"
"strings"
)
// A UUID is a 128 bit (16 byte) Universal Unique IDentifier as defined in RFC
// 4122.
type UUID []byte
// A Version represents a UUIDs version.
type Version byte
// A Variant represents a UUIDs variant.
type Variant byte
// Constants returned by Variant.
const (
Invalid = Variant(iota) // Invalid UUID
RFC4122 // The variant specified in RFC4122
Reserved // Reserved, NCS backward compatibility.
Microsoft // Reserved, Microsoft Corporation backward compatibility.
Future // Reserved for future definition.
)
var rander = rand.Reader // random function
// New returns a new random (version 4) UUID as a string. It is a convenience
// function for NewRandom().String().
func New() string {
return NewRandom().String()
}
// Parse decodes s into a UUID or returns nil. Both the UUID form of
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded.
func Parse(s string) UUID {
if len(s) == 36+9 {
if strings.ToLower(s[:9]) != "urn:uuid:" {
return nil
}
s = s[9:]
} else if len(s) != 36 {
return nil
}
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return nil
}
uuid := make([]byte, 16)
for i, x := range []int{
0, 2, 4, 6,
9, 11,
14, 16,
19, 21,
24, 26, 28, 30, 32, 34} {
if v, ok := xtob(s[x:]); !ok {
return nil
} else {
uuid[i] = v
}
}
return uuid
}
// Equal returns true if uuid1 and uuid2 are equal.
func Equal(uuid1, uuid2 UUID) bool {
return bytes.Equal(uuid1, uuid2)
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid.
func (uuid UUID) String() string {
if uuid == nil || len(uuid) != 16 {
return ""
}
b := []byte(uuid)
return fmt.Sprintf("%08x-%04x-%04x-%04x-%012x",
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
}
// URN returns the RFC 2141 URN form of uuid,
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx, or "" if uuid is invalid.
func (uuid UUID) URN() string {
if uuid == nil || len(uuid) != 16 {
return ""
}
b := []byte(uuid)
return fmt.Sprintf("urn:uuid:%08x-%04x-%04x-%04x-%012x",
b[:4], b[4:6], b[6:8], b[8:10], b[10:])
}
// Variant returns the variant encoded in uuid. It returns Invalid if
// uuid is invalid.
func (uuid UUID) Variant() Variant {
if len(uuid) != 16 {
return Invalid
}
switch {
case (uuid[8] & 0xc0) == 0x80:
return RFC4122
case (uuid[8] & 0xe0) == 0xc0:
return Microsoft
case (uuid[8] & 0xe0) == 0xe0:
return Future
default:
return Reserved
}
panic("unreachable")
}
// Version returns the verison of uuid. It returns false if uuid is not
// valid.
func (uuid UUID) Version() (Version, bool) {
if len(uuid) != 16 {
return 0, false
}
return Version(uuid[6] >> 4), true
}
func (v Version) String() string {
if v > 15 {
return fmt.Sprintf("BAD_VERSION_%d", v)
}
return fmt.Sprintf("VERSION_%d", v)
}
func (v Variant) String() string {
switch v {
case RFC4122:
return "RFC4122"
case Reserved:
return "Reserved"
case Microsoft:
return "Microsoft"
case Future:
return "Future"
case Invalid:
return "Invalid"
}
return fmt.Sprintf("BadVariant%d", int(v))
}
// SetRand sets the random number generator to r, which implents io.Reader.
// If r.Read returns an error when the package requests random data then
// a panic will be issued.
//
// Calling SetRand with nil sets the random number generator to the default
// generator.
func SetRand(r io.Reader) {
if r == nil {
rander = rand.Reader
return
}
rander = r
}

41
vendor/github.com/Xe/uuid/version1.go generated vendored Normal file
View File

@ -0,0 +1,41 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"encoding/binary"
)
// NewUUID returns a Version 1 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewUUID returns nil. If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewUUID returns nil.
func NewUUID() UUID {
if nodeID == nil {
SetNodeInterface("")
}
now, err := GetTime()
if err != nil {
return nil
}
uuid := make([]byte, 16)
time_low := uint32(now & 0xffffffff)
time_mid := uint16((now >> 32) & 0xffff)
time_hi := uint16((now >> 48) & 0x0fff)
time_hi |= 0x1000 // Version 1
binary.BigEndian.PutUint32(uuid[0:], time_low)
binary.BigEndian.PutUint16(uuid[4:], time_mid)
binary.BigEndian.PutUint16(uuid[6:], time_hi)
binary.BigEndian.PutUint16(uuid[8:], clock_seq)
copy(uuid[10:], nodeID)
return uuid
}

25
vendor/github.com/Xe/uuid/version4.go generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2011 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
// Random returns a Random (Version 4) UUID or panics.
//
// The strength of the UUIDs is based on the strength of the crypto/rand
// package.
//
// A note about uniqueness derived from from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() UUID {
uuid := make([]byte, 16)
randomBits([]byte(uuid))
uuid[6] = (uuid[6] & 0x0f) | 0x40 // Version 4
uuid[8] = (uuid[8] & 0x3f) | 0x80 // Variant is 10
return uuid
}

285
vendor/github.com/caarlos0/env/env.go generated vendored Normal file
View File

@ -0,0 +1,285 @@
package env
import (
"errors"
"os"
"reflect"
"strconv"
"strings"
"time"
)
var (
// ErrNotAStructPtr is returned if you pass something that is not a pointer to a
// Struct to Parse
ErrNotAStructPtr = errors.New("Expected a pointer to a Struct")
// ErrUnsupportedType if the struct field type is not supported by env
ErrUnsupportedType = errors.New("Type is not supported")
// ErrUnsupportedSliceType if the slice element type is not supported by env
ErrUnsupportedSliceType = errors.New("Unsupported slice type")
// Friendly names for reflect types
sliceOfInts = reflect.TypeOf([]int(nil))
sliceOfInt64s = reflect.TypeOf([]int64(nil))
sliceOfStrings = reflect.TypeOf([]string(nil))
sliceOfBools = reflect.TypeOf([]bool(nil))
sliceOfFloat32s = reflect.TypeOf([]float32(nil))
sliceOfFloat64s = reflect.TypeOf([]float64(nil))
)
// Parse parses a struct containing `env` tags and loads its values from
// environment variables.
func Parse(v interface{}) error {
ptrRef := reflect.ValueOf(v)
if ptrRef.Kind() != reflect.Ptr {
return ErrNotAStructPtr
}
ref := ptrRef.Elem()
if ref.Kind() != reflect.Struct {
return ErrNotAStructPtr
}
return doParse(ref)
}
func doParse(ref reflect.Value) error {
refType := ref.Type()
var errorList []string
for i := 0; i < refType.NumField(); i++ {
value, err := get(refType.Field(i))
if err != nil {
errorList = append(errorList, err.Error())
continue
}
if value == "" {
continue
}
if err := set(ref.Field(i), refType.Field(i), value); err != nil {
errorList = append(errorList, err.Error())
continue
}
}
if len(errorList) == 0 {
return nil
}
return errors.New(strings.Join(errorList, ". "))
}
func get(field reflect.StructField) (string, error) {
var (
val string
err error
)
key, opts := parseKeyForOption(field.Tag.Get("env"))
defaultValue := field.Tag.Get("envDefault")
val = getOr(key, defaultValue)
if len(opts) > 0 {
for _, opt := range opts {
// The only option supported is "required".
switch opt {
case "":
break
case "required":
val, err = getRequired(key)
default:
err = errors.New("Env tag option " + opt + " not supported.")
}
}
}
return val, err
}
// split the env tag's key into the expected key and desired option, if any.
func parseKeyForOption(key string) (string, []string) {
opts := strings.Split(key, ",")
return opts[0], opts[1:]
}
func getRequired(key string) (string, error) {
if value, ok := os.LookupEnv(key); ok {
return value, nil
}
// We do not use fmt.Errorf to avoid another import.
return "", errors.New("Required environment variable " + key + " is not set")
}
func getOr(key, defaultValue string) string {
value, ok := os.LookupEnv(key)
if ok {
return value
}
return defaultValue
}
func set(field reflect.Value, refType reflect.StructField, value string) error {
switch field.Kind() {
case reflect.Slice:
separator := refType.Tag.Get("envSeparator")
return handleSlice(field, value, separator)
case reflect.String:
field.SetString(value)
case reflect.Bool:
bvalue, err := strconv.ParseBool(value)
if err != nil {
return err
}
field.SetBool(bvalue)
case reflect.Int:
intValue, err := strconv.ParseInt(value, 10, 32)
if err != nil {
return err
}
field.SetInt(intValue)
case reflect.Uint:
uintValue, err := strconv.ParseUint(value, 10, 32)
if err != nil {
return err
}
field.SetUint(uintValue)
case reflect.Float32:
v, err := strconv.ParseFloat(value, 32)
if err != nil {
return err
}
field.SetFloat(v)
case reflect.Float64:
v, err := strconv.ParseFloat(value, 64)
if err != nil {
return err
}
field.Set(reflect.ValueOf(v))
case reflect.Int64:
if refType.Type.String() == "time.Duration" {
dValue, err := time.ParseDuration(value)
if err != nil {
return err
}
field.Set(reflect.ValueOf(dValue))
} else {
intValue, err := strconv.ParseInt(value, 10, 64)
if err != nil {
return err
}
field.SetInt(intValue)
}
default:
return ErrUnsupportedType
}
return nil
}
func handleSlice(field reflect.Value, value, separator string) error {
if separator == "" {
separator = ","
}
splitData := strings.Split(value, separator)
switch field.Type() {
case sliceOfStrings:
field.Set(reflect.ValueOf(splitData))
case sliceOfInts:
intData, err := parseInts(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(intData))
case sliceOfInt64s:
int64Data, err := parseInt64s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(int64Data))
case sliceOfFloat32s:
data, err := parseFloat32s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(data))
case sliceOfFloat64s:
data, err := parseFloat64s(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(data))
case sliceOfBools:
boolData, err := parseBools(splitData)
if err != nil {
return err
}
field.Set(reflect.ValueOf(boolData))
default:
return ErrUnsupportedSliceType
}
return nil
}
func parseInts(data []string) ([]int, error) {
var intSlice []int
for _, v := range data {
intValue, err := strconv.ParseInt(v, 10, 32)
if err != nil {
return nil, err
}
intSlice = append(intSlice, int(intValue))
}
return intSlice, nil
}
func parseInt64s(data []string) ([]int64, error) {
var intSlice []int64
for _, v := range data {
intValue, err := strconv.ParseInt(v, 10, 64)
if err != nil {
return nil, err
}
intSlice = append(intSlice, int64(intValue))
}
return intSlice, nil
}
func parseFloat32s(data []string) ([]float32, error) {
var float32Slice []float32
for _, v := range data {
data, err := strconv.ParseFloat(v, 32)
if err != nil {
return nil, err
}
float32Slice = append(float32Slice, float32(data))
}
return float32Slice, nil
}
func parseFloat64s(data []string) ([]float64, error) {
var float64Slice []float64
for _, v := range data {
data, err := strconv.ParseFloat(v, 64)
if err != nil {
return nil, err
}
float64Slice = append(float64Slice, float64(data))
}
return float64Slice, nil
}
func parseBools(data []string) ([]bool, error) {
var boolSlice []bool
for _, v := range data {
bvalue, err := strconv.ParseBool(v)
if err != nil {
return nil, err
}
boolSlice = append(boolSlice, bvalue)
}
return boolSlice, nil
}

166
vendor/github.com/nats-io/go-nats/context.go generated vendored Normal file
View File

@ -0,0 +1,166 @@
// Copyright 2012-2017 Apcera Inc. All rights reserved.
// +build go1.7
// A Go client for the NATS messaging system (https://nats.io).
package nats
import (
"context"
"fmt"
"reflect"
)
// RequestWithContext takes a context, a subject and payload
// in bytes and request expecting a single response.
func (nc *Conn) RequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) {
if ctx == nil {
return nil, ErrInvalidContext
}
if nc == nil {
return nil, ErrInvalidConnection
}
nc.mu.Lock()
// If user wants the old style.
if nc.Opts.UseOldRequestStyle {
nc.mu.Unlock()
return nc.oldRequestWithContext(ctx, subj, data)
}
// Do setup for the new style.
if nc.respMap == nil {
// _INBOX wildcard
nc.respSub = fmt.Sprintf("%s.*", NewInbox())
nc.respMap = make(map[string]chan *Msg)
}
// Create literal Inbox and map to a chan msg.
mch := make(chan *Msg, RequestChanLen)
respInbox := nc.newRespInbox()
token := respToken(respInbox)
nc.respMap[token] = mch
createSub := nc.respMux == nil
ginbox := nc.respSub
nc.mu.Unlock()
if createSub {
// Make sure scoped subscription is setup only once.
var err error
nc.respSetup.Do(func() { err = nc.createRespMux(ginbox) })
if err != nil {
return nil, err
}
}
err := nc.PublishRequest(subj, respInbox, data)
if err != nil {
return nil, err
}
var ok bool
var msg *Msg
select {
case msg, ok = <-mch:
if !ok {
return nil, ErrConnectionClosed
}
case <-ctx.Done():
nc.mu.Lock()
delete(nc.respMap, token)
nc.mu.Unlock()
return nil, ctx.Err()
}
return msg, nil
}
// oldRequestWithContext utilizes inbox and subscription per request.
func (nc *Conn) oldRequestWithContext(ctx context.Context, subj string, data []byte) (*Msg, error) {
inbox := NewInbox()
ch := make(chan *Msg, RequestChanLen)
s, err := nc.subscribe(inbox, _EMPTY_, nil, ch)
if err != nil {
return nil, err
}
s.AutoUnsubscribe(1)
defer s.Unsubscribe()
err = nc.PublishRequest(subj, inbox, data)
if err != nil {
return nil, err
}
return s.NextMsgWithContext(ctx)
}
// NextMsgWithContext takes a context and returns the next message
// available to a synchronous subscriber, blocking until it is delivered
// or context gets canceled.
func (s *Subscription) NextMsgWithContext(ctx context.Context) (*Msg, error) {
if ctx == nil {
return nil, ErrInvalidContext
}
if s == nil {
return nil, ErrBadSubscription
}
s.mu.Lock()
err := s.validateNextMsgState()
if err != nil {
s.mu.Unlock()
return nil, err
}
// snapshot
mch := s.mch
s.mu.Unlock()
var ok bool
var msg *Msg
select {
case msg, ok = <-mch:
if !ok {
return nil, ErrConnectionClosed
}
err := s.processNextMsgDelivered(msg)
if err != nil {
return nil, err
}
case <-ctx.Done():
return nil, ctx.Err()
}
return msg, nil
}
// RequestWithContext will create an Inbox and perform a Request
// using the provided cancellation context with the Inbox reply
// for the data v. A response will be decoded into the vPtrResponse.
func (c *EncodedConn) RequestWithContext(ctx context.Context, subject string, v interface{}, vPtr interface{}) error {
if ctx == nil {
return ErrInvalidContext
}
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
m, err := c.Conn.RequestWithContext(ctx, subject, b)
if err != nil {
return err
}
if reflect.TypeOf(vPtr) == emptyMsgType {
mPtr := vPtr.(*Msg)
*mPtr = *m
} else {
err := c.Enc.Decode(m.Subject, m.Data, vPtr)
if err != nil {
return err
}
}
return nil
}

249
vendor/github.com/nats-io/go-nats/enc.go generated vendored Normal file
View File

@ -0,0 +1,249 @@
// Copyright 2012-2015 Apcera Inc. All rights reserved.
package nats
import (
"errors"
"fmt"
"reflect"
"sync"
"time"
// Default Encoders
. "github.com/nats-io/go-nats/encoders/builtin"
)
// Encoder interface is for all register encoders
type Encoder interface {
Encode(subject string, v interface{}) ([]byte, error)
Decode(subject string, data []byte, vPtr interface{}) error
}
var encMap map[string]Encoder
var encLock sync.Mutex
// Indexe names into the Registered Encoders.
const (
JSON_ENCODER = "json"
GOB_ENCODER = "gob"
DEFAULT_ENCODER = "default"
)
func init() {
encMap = make(map[string]Encoder)
// Register json, gob and default encoder
RegisterEncoder(JSON_ENCODER, &JsonEncoder{})
RegisterEncoder(GOB_ENCODER, &GobEncoder{})
RegisterEncoder(DEFAULT_ENCODER, &DefaultEncoder{})
}
// EncodedConn are the preferred way to interface with NATS. They wrap a bare connection to
// a nats server and have an extendable encoder system that will encode and decode messages
// from raw Go types.
type EncodedConn struct {
Conn *Conn
Enc Encoder
}
// NewEncodedConn will wrap an existing Connection and utilize the appropriate registered
// encoder.
func NewEncodedConn(c *Conn, encType string) (*EncodedConn, error) {
if c == nil {
return nil, errors.New("nats: Nil Connection")
}
if c.IsClosed() {
return nil, ErrConnectionClosed
}
ec := &EncodedConn{Conn: c, Enc: EncoderForType(encType)}
if ec.Enc == nil {
return nil, fmt.Errorf("No encoder registered for '%s'", encType)
}
return ec, nil
}
// RegisterEncoder will register the encType with the given Encoder. Useful for customization.
func RegisterEncoder(encType string, enc Encoder) {
encLock.Lock()
defer encLock.Unlock()
encMap[encType] = enc
}
// EncoderForType will return the registered Encoder for the encType.
func EncoderForType(encType string) Encoder {
encLock.Lock()
defer encLock.Unlock()
return encMap[encType]
}
// Publish publishes the data argument to the given subject. The data argument
// will be encoded using the associated encoder.
func (c *EncodedConn) Publish(subject string, v interface{}) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
return c.Conn.publish(subject, _EMPTY_, b)
}
// PublishRequest will perform a Publish() expecting a response on the
// reply subject. Use Request() for automatically waiting for a response
// inline.
func (c *EncodedConn) PublishRequest(subject, reply string, v interface{}) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
return c.Conn.publish(subject, reply, b)
}
// Request will create an Inbox and perform a Request() call
// with the Inbox reply for the data v. A response will be
// decoded into the vPtrResponse.
func (c *EncodedConn) Request(subject string, v interface{}, vPtr interface{}, timeout time.Duration) error {
b, err := c.Enc.Encode(subject, v)
if err != nil {
return err
}
m, err := c.Conn.Request(subject, b, timeout)
if err != nil {
return err
}
if reflect.TypeOf(vPtr) == emptyMsgType {
mPtr := vPtr.(*Msg)
*mPtr = *m
} else {
err = c.Enc.Decode(m.Subject, m.Data, vPtr)
}
return err
}
// Handler is a specific callback used for Subscribe. It is generalized to
// an interface{}, but we will discover its format and arguments at runtime
// and perform the correct callback, including de-marshaling JSON strings
// back into the appropriate struct based on the signature of the Handler.
//
// Handlers are expected to have one of four signatures.
//
// type person struct {
// Name string `json:"name,omitempty"`
// Age uint `json:"age,omitempty"`
// }
//
// handler := func(m *Msg)
// handler := func(p *person)
// handler := func(subject string, o *obj)
// handler := func(subject, reply string, o *obj)
//
// These forms allow a callback to request a raw Msg ptr, where the processing
// of the message from the wire is untouched. Process a JSON representation
// and demarshal it into the given struct, e.g. person.
// There are also variants where the callback wants either the subject, or the
// subject and the reply subject.
type Handler interface{}
// Dissect the cb Handler's signature
func argInfo(cb Handler) (reflect.Type, int) {
cbType := reflect.TypeOf(cb)
if cbType.Kind() != reflect.Func {
panic("nats: Handler needs to be a func")
}
numArgs := cbType.NumIn()
if numArgs == 0 {
return nil, numArgs
}
return cbType.In(numArgs - 1), numArgs
}
var emptyMsgType = reflect.TypeOf(&Msg{})
// Subscribe will create a subscription on the given subject and process incoming
// messages using the specified Handler. The Handler should be a func that matches
// a signature from the description of Handler from above.
func (c *EncodedConn) Subscribe(subject string, cb Handler) (*Subscription, error) {
return c.subscribe(subject, _EMPTY_, cb)
}
// QueueSubscribe will create a queue subscription on the given subject and process
// incoming messages using the specified Handler. The Handler should be a func that
// matches a signature from the description of Handler from above.
func (c *EncodedConn) QueueSubscribe(subject, queue string, cb Handler) (*Subscription, error) {
return c.subscribe(subject, queue, cb)
}
// Internal implementation that all public functions will use.
func (c *EncodedConn) subscribe(subject, queue string, cb Handler) (*Subscription, error) {
if cb == nil {
return nil, errors.New("nats: Handler required for EncodedConn Subscription")
}
argType, numArgs := argInfo(cb)
if argType == nil {
return nil, errors.New("nats: Handler requires at least one argument")
}
cbValue := reflect.ValueOf(cb)
wantsRaw := (argType == emptyMsgType)
natsCB := func(m *Msg) {
var oV []reflect.Value
if wantsRaw {
oV = []reflect.Value{reflect.ValueOf(m)}
} else {
var oPtr reflect.Value
if argType.Kind() != reflect.Ptr {
oPtr = reflect.New(argType)
} else {
oPtr = reflect.New(argType.Elem())
}
if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
if c.Conn.Opts.AsyncErrorCB != nil {
c.Conn.ach <- func() {
c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, errors.New("nats: Got an error trying to unmarshal: "+err.Error()))
}
}
return
}
if argType.Kind() != reflect.Ptr {
oPtr = reflect.Indirect(oPtr)
}
// Callback Arity
switch numArgs {
case 1:
oV = []reflect.Value{oPtr}
case 2:
subV := reflect.ValueOf(m.Subject)
oV = []reflect.Value{subV, oPtr}
case 3:
subV := reflect.ValueOf(m.Subject)
replyV := reflect.ValueOf(m.Reply)
oV = []reflect.Value{subV, replyV, oPtr}
}
}
cbValue.Call(oV)
}
return c.Conn.subscribe(subject, queue, natsCB, nil)
}
// FlushTimeout allows a Flush operation to have an associated timeout.
func (c *EncodedConn) FlushTimeout(timeout time.Duration) (err error) {
return c.Conn.FlushTimeout(timeout)
}
// Flush will perform a round trip to the server and return when it
// receives the internal reply.
func (c *EncodedConn) Flush() error {
return c.Conn.Flush()
}
// Close will close the connection to the server. This call will release
// all blocking calls, such as Flush(), etc.
func (c *EncodedConn) Close() {
c.Conn.Close()
}
// LastError reports the last error encountered via the Connection.
func (c *EncodedConn) LastError() error {
return c.Conn.err
}

View File

@ -0,0 +1,106 @@
// Copyright 2012-2015 Apcera Inc. All rights reserved.
package builtin
import (
"bytes"
"fmt"
"reflect"
"strconv"
"unsafe"
)
// DefaultEncoder implementation for EncodedConn.
// This encoder will leave []byte and string untouched, but will attempt to
// turn numbers into appropriate strings that can be decoded. It will also
// propely encoded and decode bools. If will encode a struct, but if you want
// to properly handle structures you should use JsonEncoder.
type DefaultEncoder struct {
// Empty
}
var trueB = []byte("true")
var falseB = []byte("false")
var nilB = []byte("")
// Encode
func (je *DefaultEncoder) Encode(subject string, v interface{}) ([]byte, error) {
switch arg := v.(type) {
case string:
bytes := *(*[]byte)(unsafe.Pointer(&arg))
return bytes, nil
case []byte:
return arg, nil
case bool:
if arg {
return trueB, nil
} else {
return falseB, nil
}
case nil:
return nilB, nil
default:
var buf bytes.Buffer
fmt.Fprintf(&buf, "%+v", arg)
return buf.Bytes(), nil
}
}
// Decode
func (je *DefaultEncoder) Decode(subject string, data []byte, vPtr interface{}) error {
// Figure out what it's pointing to...
sData := *(*string)(unsafe.Pointer(&data))
switch arg := vPtr.(type) {
case *string:
*arg = sData
return nil
case *[]byte:
*arg = data
return nil
case *int:
n, err := strconv.ParseInt(sData, 10, 64)
if err != nil {
return err
}
*arg = int(n)
return nil
case *int32:
n, err := strconv.ParseInt(sData, 10, 64)
if err != nil {
return err
}
*arg = int32(n)
return nil
case *int64:
n, err := strconv.ParseInt(sData, 10, 64)
if err != nil {
return err
}
*arg = int64(n)
return nil
case *float32:
n, err := strconv.ParseFloat(sData, 32)
if err != nil {
return err
}
*arg = float32(n)
return nil
case *float64:
n, err := strconv.ParseFloat(sData, 64)
if err != nil {
return err
}
*arg = float64(n)
return nil
case *bool:
b, err := strconv.ParseBool(sData)
if err != nil {
return err
}
*arg = b
return nil
default:
vt := reflect.TypeOf(arg).Elem()
return fmt.Errorf("nats: Default Encoder can't decode to type %s", vt)
}
}

View File

@ -0,0 +1,34 @@
// Copyright 2013-2015 Apcera Inc. All rights reserved.
package builtin
import (
"bytes"
"encoding/gob"
)
// GobEncoder is a Go specific GOB Encoder implementation for EncodedConn.
// This encoder will use the builtin encoding/gob to Marshal
// and Unmarshal most types, including structs.
type GobEncoder struct {
// Empty
}
// FIXME(dlc) - This could probably be more efficient.
// Encode
func (ge *GobEncoder) Encode(subject string, v interface{}) ([]byte, error) {
b := new(bytes.Buffer)
enc := gob.NewEncoder(b)
if err := enc.Encode(v); err != nil {
return nil, err
}
return b.Bytes(), nil
}
// Decode
func (ge *GobEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) {
dec := gob.NewDecoder(bytes.NewBuffer(data))
err = dec.Decode(vPtr)
return
}

View File

@ -0,0 +1,45 @@
// Copyright 2012-2015 Apcera Inc. All rights reserved.
package builtin
import (
"encoding/json"
"strings"
)
// JsonEncoder is a JSON Encoder implementation for EncodedConn.
// This encoder will use the builtin encoding/json to Marshal
// and Unmarshal most types, including structs.
type JsonEncoder struct {
// Empty
}
// Encode
func (je *JsonEncoder) Encode(subject string, v interface{}) ([]byte, error) {
b, err := json.Marshal(v)
if err != nil {
return nil, err
}
return b, nil
}
// Decode
func (je *JsonEncoder) Decode(subject string, data []byte, vPtr interface{}) (err error) {
switch arg := vPtr.(type) {
case *string:
// If they want a string and it is a JSON string, strip quotes
// This allows someone to send a struct but receive as a plain string
// This cast should be efficient for Go 1.3 and beyond.
str := string(data)
if strings.HasPrefix(str, `"`) && strings.HasSuffix(str, `"`) {
*arg = str[1 : len(str)-1]
} else {
*arg = str
}
case *[]byte:
*arg = data
default:
err = json.Unmarshal(data, arg)
}
return
}

2975
vendor/github.com/nats-io/go-nats/nats.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

100
vendor/github.com/nats-io/go-nats/netchan.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
// Copyright 2013-2017 Apcera Inc. All rights reserved.
package nats
import (
"errors"
"reflect"
)
// This allows the functionality for network channels by binding send and receive Go chans
// to subjects and optionally queue groups.
// Data will be encoded and decoded via the EncodedConn and its associated encoders.
// BindSendChan binds a channel for send operations to NATS.
func (c *EncodedConn) BindSendChan(subject string, channel interface{}) error {
chVal := reflect.ValueOf(channel)
if chVal.Kind() != reflect.Chan {
return ErrChanArg
}
go chPublish(c, chVal, subject)
return nil
}
// Publish all values that arrive on the channel until it is closed or we
// encounter an error.
func chPublish(c *EncodedConn, chVal reflect.Value, subject string) {
for {
val, ok := chVal.Recv()
if !ok {
// Channel has most likely been closed.
return
}
if e := c.Publish(subject, val.Interface()); e != nil {
// Do this under lock.
c.Conn.mu.Lock()
defer c.Conn.mu.Unlock()
if c.Conn.Opts.AsyncErrorCB != nil {
// FIXME(dlc) - Not sure this is the right thing to do.
// FIXME(ivan) - If the connection is not yet closed, try to schedule the callback
if c.Conn.isClosed() {
go c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e)
} else {
c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, nil, e) }
}
}
return
}
}
}
// BindRecvChan binds a channel for receive operations from NATS.
func (c *EncodedConn) BindRecvChan(subject string, channel interface{}) (*Subscription, error) {
return c.bindRecvChan(subject, _EMPTY_, channel)
}
// BindRecvQueueChan binds a channel for queue-based receive operations from NATS.
func (c *EncodedConn) BindRecvQueueChan(subject, queue string, channel interface{}) (*Subscription, error) {
return c.bindRecvChan(subject, queue, channel)
}
// Internal function to bind receive operations for a channel.
func (c *EncodedConn) bindRecvChan(subject, queue string, channel interface{}) (*Subscription, error) {
chVal := reflect.ValueOf(channel)
if chVal.Kind() != reflect.Chan {
return nil, ErrChanArg
}
argType := chVal.Type().Elem()
cb := func(m *Msg) {
var oPtr reflect.Value
if argType.Kind() != reflect.Ptr {
oPtr = reflect.New(argType)
} else {
oPtr = reflect.New(argType.Elem())
}
if err := c.Enc.Decode(m.Subject, m.Data, oPtr.Interface()); err != nil {
c.Conn.err = errors.New("nats: Got an error trying to unmarshal: " + err.Error())
if c.Conn.Opts.AsyncErrorCB != nil {
c.Conn.ach <- func() { c.Conn.Opts.AsyncErrorCB(c.Conn, m.Sub, c.Conn.err) }
}
return
}
if argType.Kind() != reflect.Ptr {
oPtr = reflect.Indirect(oPtr)
}
// This is a bit hacky, but in this instance we may be trying to send to a closed channel.
// and the user does not know when it is safe to close the channel.
defer func() {
// If we have panicked, recover and close the subscription.
if r := recover(); r != nil {
m.Sub.Unsubscribe()
}
}()
// Actually do the send to the channel.
chVal.Send(oPtr)
}
return c.Conn.subscribe(subject, queue, cb, nil)
}

470
vendor/github.com/nats-io/go-nats/parser.go generated vendored Normal file
View File

@ -0,0 +1,470 @@
// Copyright 2012-2017 Apcera Inc. All rights reserved.
package nats
import (
"fmt"
)
type msgArg struct {
subject []byte
reply []byte
sid int64
size int
}
const MAX_CONTROL_LINE_SIZE = 1024
type parseState struct {
state int
as int
drop int
ma msgArg
argBuf []byte
msgBuf []byte
scratch [MAX_CONTROL_LINE_SIZE]byte
}
const (
OP_START = iota
OP_PLUS
OP_PLUS_O
OP_PLUS_OK
OP_MINUS
OP_MINUS_E
OP_MINUS_ER
OP_MINUS_ERR
OP_MINUS_ERR_SPC
MINUS_ERR_ARG
OP_M
OP_MS
OP_MSG
OP_MSG_SPC
MSG_ARG
MSG_PAYLOAD
MSG_END
OP_P
OP_PI
OP_PIN
OP_PING
OP_PO
OP_PON
OP_PONG
OP_I
OP_IN
OP_INF
OP_INFO
OP_INFO_SPC
INFO_ARG
)
// parse is the fast protocol parser engine.
func (nc *Conn) parse(buf []byte) error {
var i int
var b byte
// Move to loop instead of range syntax to allow jumping of i
for i = 0; i < len(buf); i++ {
b = buf[i]
switch nc.ps.state {
case OP_START:
switch b {
case 'M', 'm':
nc.ps.state = OP_M
case 'P', 'p':
nc.ps.state = OP_P
case '+':
nc.ps.state = OP_PLUS
case '-':
nc.ps.state = OP_MINUS
case 'I', 'i':
nc.ps.state = OP_I
default:
goto parseErr
}
case OP_M:
switch b {
case 'S', 's':
nc.ps.state = OP_MS
default:
goto parseErr
}
case OP_MS:
switch b {
case 'G', 'g':
nc.ps.state = OP_MSG
default:
goto parseErr
}
case OP_MSG:
switch b {
case ' ', '\t':
nc.ps.state = OP_MSG_SPC
default:
goto parseErr
}
case OP_MSG_SPC:
switch b {
case ' ', '\t':
continue
default:
nc.ps.state = MSG_ARG
nc.ps.as = i
}
case MSG_ARG:
switch b {
case '\r':
nc.ps.drop = 1
case '\n':
var arg []byte
if nc.ps.argBuf != nil {
arg = nc.ps.argBuf
} else {
arg = buf[nc.ps.as : i-nc.ps.drop]
}
if err := nc.processMsgArgs(arg); err != nil {
return err
}
nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, MSG_PAYLOAD
// jump ahead with the index. If this overruns
// what is left we fall out and process split
// buffer.
i = nc.ps.as + nc.ps.ma.size - 1
default:
if nc.ps.argBuf != nil {
nc.ps.argBuf = append(nc.ps.argBuf, b)
}
}
case MSG_PAYLOAD:
if nc.ps.msgBuf != nil {
if len(nc.ps.msgBuf) >= nc.ps.ma.size {
nc.processMsg(nc.ps.msgBuf)
nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END
} else {
// copy as much as we can to the buffer and skip ahead.
toCopy := nc.ps.ma.size - len(nc.ps.msgBuf)
avail := len(buf) - i
if avail < toCopy {
toCopy = avail
}
if toCopy > 0 {
start := len(nc.ps.msgBuf)
// This is needed for copy to work.
nc.ps.msgBuf = nc.ps.msgBuf[:start+toCopy]
copy(nc.ps.msgBuf[start:], buf[i:i+toCopy])
// Update our index
i = (i + toCopy) - 1
} else {
nc.ps.msgBuf = append(nc.ps.msgBuf, b)
}
}
} else if i-nc.ps.as >= nc.ps.ma.size {
nc.processMsg(buf[nc.ps.as:i])
nc.ps.argBuf, nc.ps.msgBuf, nc.ps.state = nil, nil, MSG_END
}
case MSG_END:
switch b {
case '\n':
nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
default:
continue
}
case OP_PLUS:
switch b {
case 'O', 'o':
nc.ps.state = OP_PLUS_O
default:
goto parseErr
}
case OP_PLUS_O:
switch b {
case 'K', 'k':
nc.ps.state = OP_PLUS_OK
default:
goto parseErr
}
case OP_PLUS_OK:
switch b {
case '\n':
nc.processOK()
nc.ps.drop, nc.ps.state = 0, OP_START
}
case OP_MINUS:
switch b {
case 'E', 'e':
nc.ps.state = OP_MINUS_E
default:
goto parseErr
}
case OP_MINUS_E:
switch b {
case 'R', 'r':
nc.ps.state = OP_MINUS_ER
default:
goto parseErr
}
case OP_MINUS_ER:
switch b {
case 'R', 'r':
nc.ps.state = OP_MINUS_ERR
default:
goto parseErr
}
case OP_MINUS_ERR:
switch b {
case ' ', '\t':
nc.ps.state = OP_MINUS_ERR_SPC
default:
goto parseErr
}
case OP_MINUS_ERR_SPC:
switch b {
case ' ', '\t':
continue
default:
nc.ps.state = MINUS_ERR_ARG
nc.ps.as = i
}
case MINUS_ERR_ARG:
switch b {
case '\r':
nc.ps.drop = 1
case '\n':
var arg []byte
if nc.ps.argBuf != nil {
arg = nc.ps.argBuf
nc.ps.argBuf = nil
} else {
arg = buf[nc.ps.as : i-nc.ps.drop]
}
nc.processErr(string(arg))
nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
default:
if nc.ps.argBuf != nil {
nc.ps.argBuf = append(nc.ps.argBuf, b)
}
}
case OP_P:
switch b {
case 'I', 'i':
nc.ps.state = OP_PI
case 'O', 'o':
nc.ps.state = OP_PO
default:
goto parseErr
}
case OP_PO:
switch b {
case 'N', 'n':
nc.ps.state = OP_PON
default:
goto parseErr
}
case OP_PON:
switch b {
case 'G', 'g':
nc.ps.state = OP_PONG
default:
goto parseErr
}
case OP_PONG:
switch b {
case '\n':
nc.processPong()
nc.ps.drop, nc.ps.state = 0, OP_START
}
case OP_PI:
switch b {
case 'N', 'n':
nc.ps.state = OP_PIN
default:
goto parseErr
}
case OP_PIN:
switch b {
case 'G', 'g':
nc.ps.state = OP_PING
default:
goto parseErr
}
case OP_PING:
switch b {
case '\n':
nc.processPing()
nc.ps.drop, nc.ps.state = 0, OP_START
}
case OP_I:
switch b {
case 'N', 'n':
nc.ps.state = OP_IN
default:
goto parseErr
}
case OP_IN:
switch b {
case 'F', 'f':
nc.ps.state = OP_INF
default:
goto parseErr
}
case OP_INF:
switch b {
case 'O', 'o':
nc.ps.state = OP_INFO
default:
goto parseErr
}
case OP_INFO:
switch b {
case ' ', '\t':
nc.ps.state = OP_INFO_SPC
default:
goto parseErr
}
case OP_INFO_SPC:
switch b {
case ' ', '\t':
continue
default:
nc.ps.state = INFO_ARG
nc.ps.as = i
}
case INFO_ARG:
switch b {
case '\r':
nc.ps.drop = 1
case '\n':
var arg []byte
if nc.ps.argBuf != nil {
arg = nc.ps.argBuf
nc.ps.argBuf = nil
} else {
arg = buf[nc.ps.as : i-nc.ps.drop]
}
nc.processAsyncInfo(arg)
nc.ps.drop, nc.ps.as, nc.ps.state = 0, i+1, OP_START
default:
if nc.ps.argBuf != nil {
nc.ps.argBuf = append(nc.ps.argBuf, b)
}
}
default:
goto parseErr
}
}
// Check for split buffer scenarios
if (nc.ps.state == MSG_ARG || nc.ps.state == MINUS_ERR_ARG || nc.ps.state == INFO_ARG) && nc.ps.argBuf == nil {
nc.ps.argBuf = nc.ps.scratch[:0]
nc.ps.argBuf = append(nc.ps.argBuf, buf[nc.ps.as:i-nc.ps.drop]...)
// FIXME, check max len
}
// Check for split msg
if nc.ps.state == MSG_PAYLOAD && nc.ps.msgBuf == nil {
// We need to clone the msgArg if it is still referencing the
// read buffer and we are not able to process the msg.
if nc.ps.argBuf == nil {
nc.cloneMsgArg()
}
// If we will overflow the scratch buffer, just create a
// new buffer to hold the split message.
if nc.ps.ma.size > cap(nc.ps.scratch)-len(nc.ps.argBuf) {
lrem := len(buf[nc.ps.as:])
nc.ps.msgBuf = make([]byte, lrem, nc.ps.ma.size)
copy(nc.ps.msgBuf, buf[nc.ps.as:])
} else {
nc.ps.msgBuf = nc.ps.scratch[len(nc.ps.argBuf):len(nc.ps.argBuf)]
nc.ps.msgBuf = append(nc.ps.msgBuf, (buf[nc.ps.as:])...)
}
}
return nil
parseErr:
return fmt.Errorf("nats: Parse Error [%d]: '%s'", nc.ps.state, buf[i:])
}
// cloneMsgArg is used when the split buffer scenario has the pubArg in the existing read buffer, but
// we need to hold onto it into the next read.
func (nc *Conn) cloneMsgArg() {
nc.ps.argBuf = nc.ps.scratch[:0]
nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.subject...)
nc.ps.argBuf = append(nc.ps.argBuf, nc.ps.ma.reply...)
nc.ps.ma.subject = nc.ps.argBuf[:len(nc.ps.ma.subject)]
if nc.ps.ma.reply != nil {
nc.ps.ma.reply = nc.ps.argBuf[len(nc.ps.ma.subject):]
}
}
const argsLenMax = 4
func (nc *Conn) processMsgArgs(arg []byte) error {
// Unroll splitArgs to avoid runtime/heap issues
a := [argsLenMax][]byte{}
args := a[:0]
start := -1
for i, b := range arg {
switch b {
case ' ', '\t', '\r', '\n':
if start >= 0 {
args = append(args, arg[start:i])
start = -1
}
default:
if start < 0 {
start = i
}
}
}
if start >= 0 {
args = append(args, arg[start:])
}
switch len(args) {
case 3:
nc.ps.ma.subject = args[0]
nc.ps.ma.sid = parseInt64(args[1])
nc.ps.ma.reply = nil
nc.ps.ma.size = int(parseInt64(args[2]))
case 4:
nc.ps.ma.subject = args[0]
nc.ps.ma.sid = parseInt64(args[1])
nc.ps.ma.reply = args[2]
nc.ps.ma.size = int(parseInt64(args[3]))
default:
return fmt.Errorf("nats: processMsgArgs Parse Error: '%s'", arg)
}
if nc.ps.ma.sid < 0 {
return fmt.Errorf("nats: processMsgArgs Bad or Missing Sid: '%s'", arg)
}
if nc.ps.ma.size < 0 {
return fmt.Errorf("nats: processMsgArgs Bad or Missing Size: '%s'", arg)
}
return nil
}
// Ascii numbers 0-9
const (
ascii_0 = 48
ascii_9 = 57
)
// parseInt64 expects decimal positive numbers. We
// return -1 to signal error
func parseInt64(d []byte) (n int64) {
if len(d) == 0 {
return -1
}
for _, dec := range d {
if dec < ascii_0 || dec > ascii_9 {
return -1
}
n = n*10 + (int64(dec) - ascii_0)
}
return n
}

43
vendor/github.com/nats-io/go-nats/timer.go generated vendored Normal file
View File

@ -0,0 +1,43 @@
package nats
import (
"sync"
"time"
)
// global pool of *time.Timer's. can be used by multiple goroutines concurrently.
var globalTimerPool timerPool
// timerPool provides GC-able pooling of *time.Timer's.
// can be used by multiple goroutines concurrently.
type timerPool struct {
p sync.Pool
}
// Get returns a timer that completes after the given duration.
func (tp *timerPool) Get(d time.Duration) *time.Timer {
if t, _ := tp.p.Get().(*time.Timer); t != nil {
t.Reset(d)
return t
}
return time.NewTimer(d)
}
// Put pools the given timer.
//
// There is no need to call t.Stop() before calling Put.
//
// Put will try to stop the timer before pooling. If the
// given timer already expired, Put will read the unreceived
// value if there is one.
func (tp *timerPool) Put(t *time.Timer) {
if !t.Stop() {
select {
case <-t.C:
default:
}
}
tp.p.Put(t)
}

37
vendor/github.com/nats-io/go-nats/util/tls.go generated vendored Normal file
View File

@ -0,0 +1,37 @@
// Copyright 2016 Apcera Inc. All rights reserved.
// +build go1.7
package util
import (
"crypto/tls"
)
// CloneTLSConfig returns a copy of c. Only the exported fields are copied.
// This is temporary, until this is provided by the language.
// https://go-review.googlesource.com/#/c/28075/
func CloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled,
Renegotiation: c.Renegotiation,
}
}

35
vendor/github.com/nats-io/go-nats/util/tls_pre17.go generated vendored Normal file
View File

@ -0,0 +1,35 @@
// Copyright 2016 Apcera Inc. All rights reserved.
// +build go1.5,!go1.7
package util
import (
"crypto/tls"
)
// CloneTLSConfig returns a copy of c. Only the exported fields are copied.
// This is temporary, until this is provided by the language.
// https://go-review.googlesource.com/#/c/28075/
func CloneTLSConfig(c *tls.Config) *tls.Config {
return &tls.Config{
Rand: c.Rand,
Time: c.Time,
Certificates: c.Certificates,
NameToCertificate: c.NameToCertificate,
GetCertificate: c.GetCertificate,
RootCAs: c.RootCAs,
NextProtos: c.NextProtos,
ServerName: c.ServerName,
ClientAuth: c.ClientAuth,
ClientCAs: c.ClientCAs,
InsecureSkipVerify: c.InsecureSkipVerify,
CipherSuites: c.CipherSuites,
PreferServerCipherSuites: c.PreferServerCipherSuites,
SessionTicketsDisabled: c.SessionTicketsDisabled,
SessionTicketKey: c.SessionTicketKey,
ClientSessionCache: c.ClientSessionCache,
MinVersion: c.MinVersion,
MaxVersion: c.MaxVersion,
CurvePreferences: c.CurvePreferences,
}
}

124
vendor/github.com/nats-io/nuid/nuid.go generated vendored Normal file
View File

@ -0,0 +1,124 @@
// Copyright 2016 Apcera Inc. All rights reserved.
// A unique identifier generator that is high performance, very fast, and tries to be entropy pool friendly.
package nuid
import (
"crypto/rand"
"fmt"
"math"
"math/big"
"sync"
"time"
prand "math/rand"
)
// NUID needs to be very fast to generate and truly unique, all while being entropy pool friendly.
// We will use 12 bytes of crypto generated data (entropy draining), and 10 bytes of sequential data
// that is started at a pseudo random number and increments with a pseudo-random increment.
// Total is 22 bytes of base 62 ascii text :)
// Version of the library
const Version = "1.0.0"
const (
digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz"
base = 62
preLen = 12
seqLen = 10
maxSeq = int64(839299365868340224) // base^seqLen == 62^10
minInc = int64(33)
maxInc = int64(333)
totalLen = preLen + seqLen
)
type NUID struct {
pre []byte
seq int64
inc int64
}
type lockedNUID struct {
sync.Mutex
*NUID
}
// Global NUID
var globalNUID *lockedNUID
// Seed sequential random with crypto or math/random and current time
// and generate crypto prefix.
func init() {
r, err := rand.Int(rand.Reader, big.NewInt(math.MaxInt64))
if err != nil {
prand.Seed(time.Now().UnixNano())
} else {
prand.Seed(r.Int64())
}
globalNUID = &lockedNUID{NUID: New()}
globalNUID.RandomizePrefix()
}
// New will generate a new NUID and properly initialize the prefix, sequential start, and sequential increment.
func New() *NUID {
n := &NUID{
seq: prand.Int63n(maxSeq),
inc: minInc + prand.Int63n(maxInc-minInc),
pre: make([]byte, preLen),
}
n.RandomizePrefix()
return n
}
// Generate the next NUID string from the global locked NUID instance.
func Next() string {
globalNUID.Lock()
nuid := globalNUID.Next()
globalNUID.Unlock()
return nuid
}
// Generate the next NUID string.
func (n *NUID) Next() string {
// Increment and capture.
n.seq += n.inc
if n.seq >= maxSeq {
n.RandomizePrefix()
n.resetSequential()
}
seq := n.seq
// Copy prefix
var b [totalLen]byte
bs := b[:preLen]
copy(bs, n.pre)
// copy in the seq in base36.
for i, l := len(b), seq; i > preLen; l /= base {
i -= 1
b[i] = digits[l%base]
}
return string(b[:])
}
// Resets the sequential portion of the NUID.
func (n *NUID) resetSequential() {
n.seq = prand.Int63n(maxSeq)
n.inc = minInc + prand.Int63n(maxInc-minInc)
}
// Generate a new prefix from crypto/rand.
// This call *can* drain entropy and will be called automatically when we exhaust the sequential range.
// Will panic if it gets an error from rand.Int()
func (n *NUID) RandomizePrefix() {
var cb [preLen]byte
cbs := cb[:]
if nb, err := rand.Read(cbs); nb != preLen || err != nil {
panic(fmt.Sprintf("nuid: failed generating crypto random number: %v\n", err))
}
for i := 0; i < preLen; i++ {
n.pre[i] = digits[int(cbs[i])%base]
}
}

27
vendor/github.com/robfig/cron/constantdelay.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package cron
import "time"
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
// It does not support jobs more frequent than once a second.
type ConstantDelaySchedule struct {
Delay time.Duration
}
// Every returns a crontab Schedule that activates once every duration.
// Delays of less than a second are not supported (will round up to 1 second).
// Any fields less than a Second are truncated.
func Every(duration time.Duration) ConstantDelaySchedule {
if duration < time.Second {
duration = time.Second
}
return ConstantDelaySchedule{
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
}
}
// Next returns the next time this should be run.
// This rounds so that the next activation time will be on the second.
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
}

259
vendor/github.com/robfig/cron/cron.go generated vendored Normal file
View File

@ -0,0 +1,259 @@
package cron
import (
"log"
"runtime"
"sort"
"time"
)
// Cron keeps track of any number of entries, invoking the associated func as
// specified by the schedule. It may be started, stopped, and the entries may
// be inspected while running.
type Cron struct {
entries []*Entry
stop chan struct{}
add chan *Entry
snapshot chan []*Entry
running bool
ErrorLog *log.Logger
location *time.Location
}
// Job is an interface for submitted cron jobs.
type Job interface {
Run()
}
// The Schedule describes a job's duty cycle.
type Schedule interface {
// Return the next activation time, later than the given time.
// Next is invoked initially, and then each time the job is run.
Next(time.Time) time.Time
}
// Entry consists of a schedule and the func to execute on that schedule.
type Entry struct {
// The schedule on which this job should be run.
Schedule Schedule
// The next time the job will run. This is the zero time if Cron has not been
// started or this entry's schedule is unsatisfiable
Next time.Time
// The last time this job was run. This is the zero time if the job has never
// been run.
Prev time.Time
// The Job to run.
Job Job
}
// byTime is a wrapper for sorting the entry array by time
// (with zero time at the end).
type byTime []*Entry
func (s byTime) Len() int { return len(s) }
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byTime) Less(i, j int) bool {
// Two zero times should return false.
// Otherwise, zero is "greater" than any other time.
// (To sort it at the end of the list.)
if s[i].Next.IsZero() {
return false
}
if s[j].Next.IsZero() {
return true
}
return s[i].Next.Before(s[j].Next)
}
// New returns a new Cron job runner, in the Local time zone.
func New() *Cron {
return NewWithLocation(time.Now().Location())
}
// NewWithLocation returns a new Cron job runner.
func NewWithLocation(location *time.Location) *Cron {
return &Cron{
entries: nil,
add: make(chan *Entry),
stop: make(chan struct{}),
snapshot: make(chan []*Entry),
running: false,
ErrorLog: nil,
location: location,
}
}
// A wrapper that turns a func() into a cron.Job
type FuncJob func()
func (f FuncJob) Run() { f() }
// AddFunc adds a func to the Cron to be run on the given schedule.
func (c *Cron) AddFunc(spec string, cmd func()) error {
return c.AddJob(spec, FuncJob(cmd))
}
// AddJob adds a Job to the Cron to be run on the given schedule.
func (c *Cron) AddJob(spec string, cmd Job) error {
schedule, err := Parse(spec)
if err != nil {
return err
}
c.Schedule(schedule, cmd)
return nil
}
// Schedule adds a Job to the Cron to be run on the given schedule.
func (c *Cron) Schedule(schedule Schedule, cmd Job) {
entry := &Entry{
Schedule: schedule,
Job: cmd,
}
if !c.running {
c.entries = append(c.entries, entry)
return
}
c.add <- entry
}
// Entries returns a snapshot of the cron entries.
func (c *Cron) Entries() []*Entry {
if c.running {
c.snapshot <- nil
x := <-c.snapshot
return x
}
return c.entrySnapshot()
}
// Location gets the time zone location
func (c *Cron) Location() *time.Location {
return c.location
}
// Start the cron scheduler in its own go-routine, or no-op if already started.
func (c *Cron) Start() {
if c.running {
return
}
c.running = true
go c.run()
}
// Run the cron scheduler, or no-op if already running.
func (c *Cron) Run() {
if c.running {
return
}
c.running = true
c.run()
}
func (c *Cron) runWithRecovery(j Job) {
defer func() {
if r := recover(); r != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.logf("cron: panic running job: %v\n%s", r, buf)
}
}()
j.Run()
}
// Run the scheduler. this is private just due to the need to synchronize
// access to the 'running' state variable.
func (c *Cron) run() {
// Figure out the next activation times for each entry.
now := c.now()
for _, entry := range c.entries {
entry.Next = entry.Schedule.Next(now)
}
for {
// Determine the next entry to run.
sort.Sort(byTime(c.entries))
var timer *time.Timer
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
// If there are no entries yet, just sleep - it still handles new entries
// and stop requests.
timer = time.NewTimer(100000 * time.Hour)
} else {
timer = time.NewTimer(c.entries[0].Next.Sub(now))
}
for {
select {
case now = <-timer.C:
now = now.In(c.location)
// Run every entry whose next time was less than now
for _, e := range c.entries {
if e.Next.After(now) || e.Next.IsZero() {
break
}
go c.runWithRecovery(e.Job)
e.Prev = e.Next
e.Next = e.Schedule.Next(now)
}
case newEntry := <-c.add:
timer.Stop()
now = c.now()
newEntry.Next = newEntry.Schedule.Next(now)
c.entries = append(c.entries, newEntry)
case <-c.snapshot:
c.snapshot <- c.entrySnapshot()
continue
case <-c.stop:
timer.Stop()
return
}
break
}
}
}
// Logs an error to stderr or to the configured error log
func (c *Cron) logf(format string, args ...interface{}) {
if c.ErrorLog != nil {
c.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
func (c *Cron) Stop() {
if !c.running {
return
}
c.stop <- struct{}{}
c.running = false
}
// entrySnapshot returns a copy of the current cron entry list.
func (c *Cron) entrySnapshot() []*Entry {
entries := []*Entry{}
for _, e := range c.entries {
entries = append(entries, &Entry{
Schedule: e.Schedule,
Next: e.Next,
Prev: e.Prev,
Job: e.Job,
})
}
return entries
}
// now returns current time in c location
func (c *Cron) now() time.Time {
return time.Now().In(c.location)
}

129
vendor/github.com/robfig/cron/doc.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
/*
Package cron implements a cron spec parser and job runner.
Usage
Callers may register Funcs to be invoked on a given schedule. Cron will run
them in their own goroutines.
c := cron.New()
c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") })
c.AddFunc("@hourly", func() { fmt.Println("Every hour") })
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") })
c.Start()
..
// Funcs are invoked in their own goroutine, asynchronously.
...
// Funcs may also be added to a running Cron
c.AddFunc("@daily", func() { fmt.Println("Every day") })
..
// Inspect the cron job entries' next and previous run times.
inspect(c.Entries())
..
c.Stop() // Stop the scheduler (does not stop any jobs already running).
CRON Expression Format
A cron expression represents a set of times, using 6 space-separated fields.
Field name | Mandatory? | Allowed values | Allowed special characters
---------- | ---------- | -------------- | --------------------------
Seconds | Yes | 0-59 | * / , -
Minutes | Yes | 0-59 | * / , -
Hours | Yes | 0-23 | * / , -
Day of month | Yes | 1-31 | * / , - ?
Month | Yes | 1-12 or JAN-DEC | * / , -
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun",
and "sun" are equally accepted.
Special Characters
Asterisk ( * )
The asterisk indicates that the cron expression will match for all values of the
field; e.g., using an asterisk in the 5th field (month) would indicate every
month.
Slash ( / )
Slashes are used to describe increments of ranges. For example 3-59/15 in the
1st field (minutes) would indicate the 3rd minute of the hour and every 15
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
that is, an increment over the largest possible range of the field. The form
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
increment until the end of that specific range. It does not wrap around.
Comma ( , )
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
Hyphen ( - )
Hyphens are used to define ranges. For example, 9-17 would indicate every
hour between 9am and 5pm inclusive.
Question mark ( ? )
Question mark may be used instead of '*' for leaving either day-of-month or
day-of-week blank.
Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
Entry | Description | Equivalent To
----- | ----------- | -------------
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 *
@monthly | Run once a month, midnight, first of month | 0 0 0 1 * *
@weekly | Run once a week, midnight on Sunday | 0 0 0 * * 0
@daily (or @midnight) | Run once a day, midnight | 0 0 0 * * *
@hourly | Run once an hour, beginning of hour | 0 0 * * * *
Intervals
You may also schedule a job to execute at fixed intervals, starting at the time it's added
or cron is run. This is supported by formatting the cron spec like this:
@every <duration>
where "duration" is a string accepted by time.ParseDuration
(http://golang.org/pkg/time/#ParseDuration).
For example, "@every 1h30m10s" would indicate a schedule that activates immediately,
and then every 1 hour, 30 minutes, 10 seconds.
Note: The interval does not take the job runtime into account. For example,
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
it will have only 2 minutes of idle time between each run.
Time zones
All interpretation and scheduling is done in the machine's local time zone (as
provided by the Go time package (http://www.golang.org/pkg/time).
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
not be run!
Thread safety
Since the Cron service runs concurrently with the calling code, some amount of
care must be taken to ensure proper synchronization.
All cron methods are designed to be correctly synchronized as long as the caller
ensures that invocations have a clear happens-before ordering between them.
Implementation
Cron entries are stored in an array, sorted by their next activation time. Cron
sleeps until the next job is due to be run.
Upon waking:
- it runs each entry that is active on that second
- it calculates the next run times for the jobs that were run
- it re-sorts the array of entries by next activation time.
- it goes to sleep until the soonest job.
*/
package cron

380
vendor/github.com/robfig/cron/parser.go generated vendored Normal file
View File

@ -0,0 +1,380 @@
package cron
import (
"fmt"
"math"
"strconv"
"strings"
"time"
)
// Configuration options for creating a parser. Most options specify which
// fields should be included, while others enable features. If a field is not
// included the parser will assume a default value. These options do not change
// the order fields are parse in.
type ParseOption int
const (
Second ParseOption = 1 << iota // Seconds field, default 0
Minute // Minutes field, default 0
Hour // Hours field, default 0
Dom // Day of month field, default *
Month // Month field, default *
Dow // Day of week field, default *
DowOptional // Optional day of week field, default *
Descriptor // Allow descriptors such as @monthly, @weekly, etc.
)
var places = []ParseOption{
Second,
Minute,
Hour,
Dom,
Month,
Dow,
}
var defaults = []string{
"0",
"0",
"0",
"*",
"*",
"*",
}
// A custom Parser that can be configured.
type Parser struct {
options ParseOption
optionals int
}
// Creates a custom Parser with custom options.
//
// // Standard parser without descriptors
// specParser := NewParser(Minute | Hour | Dom | Month | Dow)
// sched, err := specParser.Parse("0 0 15 */3 *")
//
// // Same as above, just excludes time fields
// subsParser := NewParser(Dom | Month | Dow)
// sched, err := specParser.Parse("15 */3 *")
//
// // Same as above, just makes Dow optional
// subsParser := NewParser(Dom | Month | DowOptional)
// sched, err := specParser.Parse("15 */3")
//
func NewParser(options ParseOption) Parser {
optionals := 0
if options&DowOptional > 0 {
options |= Dow
optionals++
}
return Parser{options, optionals}
}
// Parse returns a new crontab schedule representing the given spec.
// It returns a descriptive error if the spec is not valid.
// It accepts crontab specs and features configured by NewParser.
func (p Parser) Parse(spec string) (Schedule, error) {
if len(spec) == 0 {
return nil, fmt.Errorf("Empty spec string")
}
if spec[0] == '@' && p.options&Descriptor > 0 {
return parseDescriptor(spec)
}
// Figure out how many fields we need
max := 0
for _, place := range places {
if p.options&place > 0 {
max++
}
}
min := max - p.optionals
// Split fields on whitespace
fields := strings.Fields(spec)
// Validate number of fields
if count := len(fields); count < min || count > max {
if min == max {
return nil, fmt.Errorf("Expected exactly %d fields, found %d: %s", min, count, spec)
}
return nil, fmt.Errorf("Expected %d to %d fields, found %d: %s", min, max, count, spec)
}
// Fill in missing fields
fields = expandFields(fields, p.options)
var err error
field := func(field string, r bounds) uint64 {
if err != nil {
return 0
}
var bits uint64
bits, err = getField(field, r)
return bits
}
var (
second = field(fields[0], seconds)
minute = field(fields[1], minutes)
hour = field(fields[2], hours)
dayofmonth = field(fields[3], dom)
month = field(fields[4], months)
dayofweek = field(fields[5], dow)
)
if err != nil {
return nil, err
}
return &SpecSchedule{
Second: second,
Minute: minute,
Hour: hour,
Dom: dayofmonth,
Month: month,
Dow: dayofweek,
}, nil
}
func expandFields(fields []string, options ParseOption) []string {
n := 0
count := len(fields)
expFields := make([]string, len(places))
copy(expFields, defaults)
for i, place := range places {
if options&place > 0 {
expFields[i] = fields[n]
n++
}
if n == count {
break
}
}
return expFields
}
var standardParser = NewParser(
Minute | Hour | Dom | Month | Dow | Descriptor,
)
// ParseStandard returns a new crontab schedule representing the given standardSpec
// (https://en.wikipedia.org/wiki/Cron). It differs from Parse requiring to always
// pass 5 entries representing: minute, hour, day of month, month and day of week,
// in that order. It returns a descriptive error if the spec is not valid.
//
// It accepts
// - Standard crontab specs, e.g. "* * * * ?"
// - Descriptors, e.g. "@midnight", "@every 1h30m"
func ParseStandard(standardSpec string) (Schedule, error) {
return standardParser.Parse(standardSpec)
}
var defaultParser = NewParser(
Second | Minute | Hour | Dom | Month | DowOptional | Descriptor,
)
// Parse returns a new crontab schedule representing the given spec.
// It returns a descriptive error if the spec is not valid.
//
// It accepts
// - Full crontab specs, e.g. "* * * * * ?"
// - Descriptors, e.g. "@midnight", "@every 1h30m"
func Parse(spec string) (Schedule, error) {
return defaultParser.Parse(spec)
}
// getField returns an Int with the bits set representing all of the times that
// the field represents or error parsing field value. A "field" is a comma-separated
// list of "ranges".
func getField(field string, r bounds) (uint64, error) {
var bits uint64
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
for _, expr := range ranges {
bit, err := getRange(expr, r)
if err != nil {
return bits, err
}
bits |= bit
}
return bits, nil
}
// getRange returns the bits indicated by the given expression:
// number | number "-" number [ "/" number ]
// or error parsing range.
func getRange(expr string, r bounds) (uint64, error) {
var (
start, end, step uint
rangeAndStep = strings.Split(expr, "/")
lowAndHigh = strings.Split(rangeAndStep[0], "-")
singleDigit = len(lowAndHigh) == 1
err error
)
var extra uint64
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
start = r.min
end = r.max
extra = starBit
} else {
start, err = parseIntOrName(lowAndHigh[0], r.names)
if err != nil {
return 0, err
}
switch len(lowAndHigh) {
case 1:
end = start
case 2:
end, err = parseIntOrName(lowAndHigh[1], r.names)
if err != nil {
return 0, err
}
default:
return 0, fmt.Errorf("Too many hyphens: %s", expr)
}
}
switch len(rangeAndStep) {
case 1:
step = 1
case 2:
step, err = mustParseInt(rangeAndStep[1])
if err != nil {
return 0, err
}
// Special handling: "N/step" means "N-max/step".
if singleDigit {
end = r.max
}
default:
return 0, fmt.Errorf("Too many slashes: %s", expr)
}
if start < r.min {
return 0, fmt.Errorf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
}
if end > r.max {
return 0, fmt.Errorf("End of range (%d) above maximum (%d): %s", end, r.max, expr)
}
if start > end {
return 0, fmt.Errorf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
}
if step == 0 {
return 0, fmt.Errorf("Step of range should be a positive number: %s", expr)
}
return getBits(start, end, step) | extra, nil
}
// parseIntOrName returns the (possibly-named) integer contained in expr.
func parseIntOrName(expr string, names map[string]uint) (uint, error) {
if names != nil {
if namedInt, ok := names[strings.ToLower(expr)]; ok {
return namedInt, nil
}
}
return mustParseInt(expr)
}
// mustParseInt parses the given expression as an int or returns an error.
func mustParseInt(expr string) (uint, error) {
num, err := strconv.Atoi(expr)
if err != nil {
return 0, fmt.Errorf("Failed to parse int from %s: %s", expr, err)
}
if num < 0 {
return 0, fmt.Errorf("Negative number (%d) not allowed: %s", num, expr)
}
return uint(num), nil
}
// getBits sets all bits in the range [min, max], modulo the given step size.
func getBits(min, max, step uint) uint64 {
var bits uint64
// If step is 1, use shifts.
if step == 1 {
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
}
// Else, use a simple loop.
for i := min; i <= max; i += step {
bits |= 1 << i
}
return bits
}
// all returns all bits within the given bounds. (plus the star bit)
func all(r bounds) uint64 {
return getBits(r.min, r.max, 1) | starBit
}
// parseDescriptor returns a predefined schedule for the expression, or error if none matches.
func parseDescriptor(descriptor string) (Schedule, error) {
switch descriptor {
case "@yearly", "@annually":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: 1 << months.min,
Dow: all(dow),
}, nil
case "@monthly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: all(months),
Dow: all(dow),
}, nil
case "@weekly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: 1 << dow.min,
}, nil
case "@daily", "@midnight":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: all(dow),
}, nil
case "@hourly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: all(hours),
Dom: all(dom),
Month: all(months),
Dow: all(dow),
}, nil
}
const every = "@every "
if strings.HasPrefix(descriptor, every) {
duration, err := time.ParseDuration(descriptor[len(every):])
if err != nil {
return nil, fmt.Errorf("Failed to parse duration %s: %s", descriptor, err)
}
return Every(duration), nil
}
return nil, fmt.Errorf("Unrecognized descriptor: %s", descriptor)
}

158
vendor/github.com/robfig/cron/spec.go generated vendored Normal file
View File

@ -0,0 +1,158 @@
package cron
import "time"
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
// traditional crontab specification. It is computed initially and stored as bit sets.
type SpecSchedule struct {
Second, Minute, Hour, Dom, Month, Dow uint64
}
// bounds provides a range of acceptable values (plus a map of name to value).
type bounds struct {
min, max uint
names map[string]uint
}
// The bounds for each field.
var (
seconds = bounds{0, 59, nil}
minutes = bounds{0, 59, nil}
hours = bounds{0, 23, nil}
dom = bounds{1, 31, nil}
months = bounds{1, 12, map[string]uint{
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}}
dow = bounds{0, 6, map[string]uint{
"sun": 0,
"mon": 1,
"tue": 2,
"wed": 3,
"thu": 4,
"fri": 5,
"sat": 6,
}}
)
const (
// Set the top bit if a star was included in the expression.
starBit = 1 << 63
)
// Next returns the next time this schedule is activated, greater than the given
// time. If no time can be found to satisfy the schedule, return the zero time.
func (s *SpecSchedule) Next(t time.Time) time.Time {
// General approach:
// For Month, Day, Hour, Minute, Second:
// Check if the time value matches. If yes, continue to the next field.
// If the field doesn't match the schedule, then increment the field until it matches.
// While incrementing the field, a wrap-around brings it back to the beginning
// of the field list (since it is necessary to re-verify previous field
// values)
// Start at the earliest possible time (the upcoming second).
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
// This flag indicates whether a field has been incremented.
added := false
// If no time is found within five years, return zero.
yearLimit := t.Year() + 5
WRAP:
if t.Year() > yearLimit {
return time.Time{}
}
// Find the first applicable month.
// If it's this month, then do nothing.
for 1<<uint(t.Month())&s.Month == 0 {
// If we have to add a month, reset the other parts to 0.
if !added {
added = true
// Otherwise, set the date at the beginning (since the current time is irrelevant).
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
}
t = t.AddDate(0, 1, 0)
// Wrapped around.
if t.Month() == time.January {
goto WRAP
}
}
// Now get a day in that month.
for !dayMatches(s, t) {
if !added {
added = true
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
}
t = t.AddDate(0, 0, 1)
if t.Day() == 1 {
goto WRAP
}
}
for 1<<uint(t.Hour())&s.Hour == 0 {
if !added {
added = true
t = time.Date(t.Year(), t.Month(), t.Day(), t.Hour(), 0, 0, 0, t.Location())
}
t = t.Add(1 * time.Hour)
if t.Hour() == 0 {
goto WRAP
}
}
for 1<<uint(t.Minute())&s.Minute == 0 {
if !added {
added = true
t = t.Truncate(time.Minute)
}
t = t.Add(1 * time.Minute)
if t.Minute() == 0 {
goto WRAP
}
}
for 1<<uint(t.Second())&s.Second == 0 {
if !added {
added = true
t = t.Truncate(time.Second)
}
t = t.Add(1 * time.Second)
if t.Second() == 0 {
goto WRAP
}
}
return t
}
// dayMatches returns true if the schedule's day-of-week and day-of-month
// restrictions are satisfied by the given time.
func dayMatches(s *SpecSchedule, t time.Time) bool {
var (
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
)
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
return domMatch && dowMatch
}
return domMatch || dowMatch
}